From 85c675d0d09a45a135bddd15d7b385f8758c32fb Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:35:05 +0200 Subject: Adding upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/amd/amdgpu/Makefile | 25 +- drivers/gpu/drm/amd/amdgpu/aldebaran.c | 65 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 116 +- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 72 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 58 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 40 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 48 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 136 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 17 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 18 +- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 11 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 624 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 189 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 28 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 175 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c | 166 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h | 11 + drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 8 + drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 42 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 15 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 238 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 32 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 45 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 207 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 352 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 115 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 42 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 54 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 169 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 625 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 45 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 92 +- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 31 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 30 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h | 12 + drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 99 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 37 + drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 35 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 878 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h | 228 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 35 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 25 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 15 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 50 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 23 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 92 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 42 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c | 656 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h | 91 + drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 90 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 28 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 243 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 5 +- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 2 +- drivers/gpu/drm/amd/amdgpu/athub_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/athub_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/athub_v2_1.c | 2 +- drivers/gpu/drm/amd/amdgpu/athub_v3_0.c | 7 +- drivers/gpu/drm/amd/amdgpu/atom.c | 19 +- drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 1 - drivers/gpu/drm/amd/amdgpu/cik.c | 4 - drivers/gpu/drm/amd/amdgpu/cik_ih.c | 6 + drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 16 +- drivers/gpu/drm/amd/amdgpu/cz_ih.c | 5 + drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c | 34 + drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h | 31 + drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 153 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 184 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 97 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 181 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c | 516 ++++++ drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h | 29 + drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 20 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c | 5 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 275 +-- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 220 ++- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 11 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 37 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 38 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 438 +++-- drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c | 27 +- drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c | 20 +- drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 5 + drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 10 +- drivers/gpu/drm/amd/amdgpu/ih_v6_1.c | 7 + drivers/gpu/drm/amd/amdgpu/imu_v11_0.c | 9 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 +- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 14 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 21 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c | 623 +++++++ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h | 35 + drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 10 +- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 62 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 160 +- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 14 +- drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c | 4 +- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c | 7 +- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c | 7 +- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c | 5 +- drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c | 589 +++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h | 29 + drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h | 12 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 +- drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 14 +- drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c | 6 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c | 372 ++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h | 33 + drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c | 10 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 11 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c | 4 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 25 +- drivers/gpu/drm/amd/amdgpu/nv.c | 20 +- drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 4 + drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 7 +- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/psp_v13_0.c | 99 +- drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 16 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 16 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 75 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 62 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 25 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 30 +- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 115 +- drivers/gpu/drm/amd/amdgpu/si.c | 2 - drivers/gpu/drm/amd/amdgpu/si_dma.c | 5 - drivers/gpu/drm/amd/amdgpu/si_ih.c | 6 + drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c | 22 +- drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c | 19 +- drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c | 22 + drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c | 6 + drivers/gpu/drm/amd/amdgpu/soc15.c | 55 +- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 14 +- drivers/gpu/drm/amd/amdgpu/soc21.c | 54 +- drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h | 62 +- drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 6 + drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 389 +++++ drivers/gpu/drm/amd/amdgpu/umc_v12_0.h | 130 ++ drivers/gpu/drm/amd/amdgpu/umc_v8_10.c | 12 +- drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c | 424 +++++ drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h | 30 + drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c | 10 +- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 10 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 10 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 10 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 +- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 2 - drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 2 - drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 5 - drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 9 +- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 14 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 80 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 5 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 1779 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h | 35 + drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 6 + drivers/gpu/drm/amd/amdgpu/vega20_ih.c | 20 +- drivers/gpu/drm/amd/amdgpu/vi.c | 5 +- drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h | 217 +++ drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c | 291 ++++ drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h | 29 + 209 files changed, 12883 insertions(+), 2775 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h create mode 100644 drivers/gpu/drm/amd/amdgpu/umc_v12_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/umc_v12_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 384b798a9..2afecc550 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -98,13 +98,14 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram.o + nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o # add DF block amdgpu-y += \ df_v1_7.o \ df_v3_6.o \ - df_v4_3.o + df_v4_3.o \ + df_v4_6_2.o # add GMC block amdgpu-y += \ @@ -113,11 +114,12 @@ amdgpu-y += \ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \ mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \ - mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o + mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o mmhub_v3_3.o \ + gfxhub_v11_5_0.o # add UMC block amdgpu-y += \ - umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o + umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o # add IH block amdgpu-y += \ @@ -205,14 +207,27 @@ amdgpu-y += \ vcn_v3_0.o \ vcn_v4_0.o \ vcn_v4_0_3.o \ + vcn_v4_0_5.o \ amdgpu_jpeg.o \ jpeg_v1_0.o \ jpeg_v2_0.o \ jpeg_v2_5.o \ jpeg_v3_0.o \ jpeg_v4_0.o \ - jpeg_v4_0_3.o + jpeg_v4_0_3.o \ + jpeg_v4_0_5.o +# add VPE block +amdgpu-y += \ + amdgpu_vpe.o \ + vpe_v6_1.o + +# add UMSCH block +amdgpu-y += \ + amdgpu_umsch_mm.o \ + umsch_mm_v4_0.o + +# # add ATHUB block amdgpu-y += \ athub_v1_0.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 2b97b8a96..576067d66 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -35,7 +35,7 @@ static bool aldebaran_is_mode2_default(struct amdgpu_reset_control *reset_ctl) { struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && adev->gmc.xgmi.connected_to_cpu)) return true; @@ -48,27 +48,24 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl, { struct amdgpu_reset_handler *handler; struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int i; + + if (reset_context->method == AMD_RESET_METHOD_NONE) { + if (aldebaran_is_mode2_default(reset_ctl)) + reset_context->method = AMD_RESET_METHOD_MODE2; + else + reset_context->method = amdgpu_asic_reset_method(adev); + } if (reset_context->method != AMD_RESET_METHOD_NONE) { dev_dbg(adev->dev, "Getting reset handler for method %d\n", reset_context->method); - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_context->method) return handler; } } - if (aldebaran_is_mode2_default(reset_ctl)) { - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { - if (handler->reset_method == AMD_RESET_METHOD_MODE2) { - reset_context->method = AMD_RESET_METHOD_MODE2; - return handler; - } - } - } - dev_dbg(adev->dev, "Reset handler not found!\n"); return NULL; @@ -124,9 +121,9 @@ static void aldebaran_async_reset(struct work_struct *work) struct amdgpu_reset_control *reset_ctl = container_of(work, struct amdgpu_reset_control, reset_work); struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int i; - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_ctl->active_reset) { dev_dbg(adev->dev, "Resetting device\n"); handler->do_reset(adev); @@ -157,7 +154,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, if (reset_device_list == NULL) return -EINVAL; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && reset_context->hive == NULL) { /* Wrong context, return error */ return -EINVAL; @@ -333,12 +330,13 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, { struct list_head *reset_device_list = reset_context->reset_device_list; struct amdgpu_device *tmp_adev = NULL; + struct amdgpu_ras *con; int r; if (reset_device_list == NULL) return -EINVAL; - if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] == + if (amdgpu_ip_version(reset_context->reset_req_dev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && reset_context->hive == NULL) { /* Wrong context, return error */ @@ -358,7 +356,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, */ amdgpu_register_gpu_instance(tmp_adev); - /* Resume RAS */ + /* Resume RAS, ecc_irq */ + con = amdgpu_ras_get_context(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev) && con) { + if (tmp_adev->sdma.ras && + tmp_adev->sdma.ras->ras_block.ras_late_init) { + r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev, + &tmp_adev->sdma.ras->ras_block.ras_comm); + if (r) { + dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r); + goto end; + } + } + + if (tmp_adev->gfx.ras && + tmp_adev->gfx.ras->ras_block.ras_late_init) { + r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev, + &tmp_adev->gfx.ras->ras_block.ras_comm); + if (r) { + dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r); + goto end; + } + } + } + amdgpu_ras_resume(tmp_adev); /* Update PSP FW topology after reset */ @@ -395,6 +416,11 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = { .do_reset = aldebaran_mode2_reset, }; +static struct amdgpu_reset_handler + *aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { + &aldebaran_mode2_handler, + }; + int aldebaran_reset_init(struct amdgpu_device *adev) { struct amdgpu_reset_control *reset_ctl; @@ -408,10 +434,9 @@ int aldebaran_reset_init(struct amdgpu_device *adev) reset_ctl->active_reset = AMD_RESET_METHOD_NONE; reset_ctl->get_reset_handler = aldebaran_get_reset_handler; - INIT_LIST_HEAD(&reset_ctl->reset_handlers); INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset); /* Only mode2 is handled through reset control now */ - amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler); + reset_ctl->reset_handlers = &aldebaran_rst_handlers; adev->reset_cntl = reset_ctl; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a79d53bdb..31d4b5a2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -79,6 +79,8 @@ #include "amdgpu_vce.h" #include "amdgpu_vcn.h" #include "amdgpu_jpeg.h" +#include "amdgpu_vpe.h" +#include "amdgpu_umsch_mm.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" @@ -242,8 +244,11 @@ extern int amdgpu_num_kcq; #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) extern int amdgpu_vcnfw_log; extern int amdgpu_sg_display; +extern int amdgpu_umsch_mm; +extern int amdgpu_seamless; extern int amdgpu_user_partt_mode; +extern int amdgpu_agp; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) @@ -359,9 +364,6 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; -#define HW_REV(_Major, _Minor, _Rev) \ - ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) - struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; @@ -623,6 +625,9 @@ typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); +typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); +typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); + typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); @@ -654,6 +659,7 @@ enum amd_hw_ip_block_type { JPEG_HWIP = VCN_HWIP, VCN1_HWIP, VCE_HWIP, + VPE_HWIP, DF_HWIP, DCE_HWIP, OSSSYS_HWIP, @@ -673,10 +679,15 @@ enum amd_hw_ip_block_type { #define HWIP_MAX_INSTANCE 44 #define HW_ID_MAX 300 -#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv)) -#define IP_VERSION_MAJ(ver) ((ver) >> 16) -#define IP_VERSION_MIN(ver) (((ver) >> 8) & 0xFF) -#define IP_VERSION_REV(ver) ((ver) & 0xFF) +#define IP_VERSION_FULL(mj, mn, rv, var, srev) \ + (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev)) +#define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0) +#define IP_VERSION_MAJ(ver) ((ver) >> 24) +#define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF) +#define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF) +#define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF) +#define IP_VERSION_SUBREV(ver) ((ver) & 0xF) +#define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8) struct amdgpu_ip_map_info { /* Map of logical to actual dev instances/mask */ @@ -746,6 +757,7 @@ struct amdgpu_mqd_prop { uint64_t eop_gpu_addr; uint32_t hqd_pipe_priority; uint32_t hqd_queue_priority; + bool allow_tunneling; bool hqd_active; }; @@ -757,8 +769,19 @@ struct amdgpu_mqd { #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 -#define AMDGPU_PRODUCT_NAME_LEN 64 struct amdgpu_reset_domain; +struct amdgpu_fru_info; + +struct amdgpu_reset_info { + /* reset dump register */ + u32 *reset_dump_reg_list; + u32 *reset_dump_reg_value; + int num_regs; + +#ifdef CONFIG_DEV_COREDUMP + struct amdgpu_coredump_info *coredump_info; +#endif +}; /* * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. @@ -826,6 +849,8 @@ struct amdgpu_device { amdgpu_wreg_ext_t pcie_wreg_ext; amdgpu_rreg64_t pcie_rreg64; amdgpu_wreg64_t pcie_wreg64; + amdgpu_rreg64_ext_t pcie_rreg64_ext; + amdgpu_wreg64_ext_t pcie_wreg64_ext; /* protects concurrent UVD register access */ spinlock_t uvd_ctx_idx_lock; amdgpu_rreg_t uvd_ctx_rreg; @@ -946,6 +971,13 @@ struct amdgpu_device { /* jpeg */ struct amdgpu_jpeg jpeg; + /* vpe */ + struct amdgpu_vpe vpe; + + /* umsch */ + struct amdgpu_umsch_mm umsch_mm; + bool enable_umsch_mm; + /* firmwares */ struct amdgpu_firmware firmware; @@ -1009,6 +1041,8 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + /* indicate amdgpu suspension status */ + bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1033,11 +1067,7 @@ struct amdgpu_device { bool ucode_sysfs_en; - /* Chip product information */ - char product_number[20]; - char product_name[AMDGPU_PRODUCT_NAME_LEN]; - char serial[20]; - + struct amdgpu_fru_info *fru_info; atomic_t throttling_logging_enabled; struct ratelimit_state throttling_logging_rs; uint32_t ras_hw_enabled; @@ -1063,15 +1093,7 @@ struct amdgpu_device { struct mutex benchmark_mutex; - /* reset dump register */ - uint32_t *reset_dump_reg_list; - uint32_t *reset_dump_reg_value; - int num_regs; -#ifdef CONFIG_DEV_COREDUMP - struct amdgpu_task_info reset_task_info; - bool reset_vram_lost; - struct timespec64 reset_time; -#endif + struct amdgpu_reset_info reset_info; bool scpm_enabled; uint32_t scpm_status; @@ -1082,8 +1104,29 @@ struct amdgpu_device { bool dc_enabled; /* Mask of active clusters */ uint32_t aid_mask; + + /* Debug */ + bool debug_vm; + bool debug_largebar; + bool debug_disable_soft_recovery; }; +static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This considers only major/minor/rev and ignores + * subrevision/variant fields. + */ + return adev->ip_versions[ip][inst] & ~0xFFU; +} + +static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This returns full version - major/minor/rev/variant/subrevision */ + return adev->ip_versions[ip][inst]; +} + static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) { return container_of(ddev, struct amdgpu_device, ddev); @@ -1120,11 +1163,18 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, u64 reg_addr); +uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr, u32 reg_data); +void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); @@ -1134,10 +1184,14 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, u32 reg_addr); u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, u32 reg_addr); +u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, + u64 reg_addr); void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, u32 reg_addr, u32 reg_data); void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, u32 reg_addr, u64 reg_data); +void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, + u64 reg_addr, u64 reg_data); u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); @@ -1161,8 +1215,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) -#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) -#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) @@ -1172,6 +1226,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) +#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) +#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) @@ -1180,6 +1236,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) +#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) +#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) @@ -1275,15 +1333,13 @@ int emu_soc_asic_init(struct amdgpu_device *adev); ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) -#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); +#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) #define for_each_inst(i, inst_mask) \ for (i = ffs(inst_mask); i-- != 0; \ i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) - /* Common functions */ bool amdgpu_device_has_job_running(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); @@ -1293,9 +1349,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); -bool amdgpu_device_pcie_dynamic_switching_supported(void); +bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); -bool amdgpu_device_aspm_support_quirk(void); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); @@ -1367,6 +1422,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_release_kms(struct drm_device *dev); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); +int amdgpu_device_prepare(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); @@ -1455,9 +1511,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif #if defined(CONFIG_DRM_AMD_DC) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2bca37044..7099ff9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -68,7 +68,7 @@ struct amdgpu_acpi_xcc_info { struct amdgpu_acpi_dev_info { struct list_head list; struct list_head xcc_list; - uint16_t bdf; + uint32_t sbdf; uint16_t supp_xcp_mode; uint16_t xcp_mode; uint16_t mem_mode; @@ -927,7 +927,7 @@ static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, #endif } -static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) +static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u32 sbdf) { struct amdgpu_acpi_dev_info *acpi_dev; @@ -935,14 +935,14 @@ static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) return NULL; list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list) - if (acpi_dev->bdf == bdf) + if (acpi_dev->sbdf == sbdf) return acpi_dev; return NULL; } static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, - struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf) + struct amdgpu_acpi_xcc_info *xcc_info, u32 sbdf) { struct amdgpu_acpi_dev_info *tmp; union acpi_object *obj; @@ -955,7 +955,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, INIT_LIST_HEAD(&tmp->xcc_list); INIT_LIST_HEAD(&tmp->list); - tmp->bdf = bdf; + tmp->sbdf = sbdf; obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, AMD_XCC_DSM_GET_SUPP_MODE, NULL, @@ -1007,7 +1007,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, DRM_DEBUG_DRIVER( "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ", - tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, + tmp->sbdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, tmp->tmr_base, tmp->tmr_size); list_add_tail(&tmp->list, &amdgpu_acpi_dev_list); *dev_info = tmp; @@ -1023,7 +1023,7 @@ out: } static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, - u16 *bdf) + u32 *sbdf) { union acpi_object *obj; acpi_status status; @@ -1054,8 +1054,10 @@ static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF; /* xcp node of this xcc [47:40] */ xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF; + /* PF domain of this xcc [31:16] */ + *sbdf = (obj->integer.value) & 0xFFFF0000; /* PF bus/dev/fn of this xcc [63:48] */ - *bdf = (obj->integer.value >> 48) & 0xFFFF; + *sbdf |= (obj->integer.value >> 48) & 0xFFFF; ACPI_FREE(obj); obj = NULL; @@ -1079,7 +1081,7 @@ static int amdgpu_acpi_enumerate_xcc(void) struct acpi_device *acpi_dev; char hid[ACPI_ID_LEN]; int ret, id; - u16 bdf; + u32 sbdf; INIT_LIST_HEAD(&amdgpu_acpi_dev_list); xa_init(&numa_info_xa); @@ -1107,16 +1109,16 @@ static int amdgpu_acpi_enumerate_xcc(void) xcc_info->handle = acpi_device_handle(acpi_dev); acpi_dev_put(acpi_dev); - ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf); + ret = amdgpu_acpi_get_xcc_info(xcc_info, &sbdf); if (ret) { kfree(xcc_info); continue; } - dev_info = amdgpu_acpi_get_dev(bdf); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) - ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf); + ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf); if (ret == -ENOMEM) return ret; @@ -1136,13 +1138,14 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size) { struct amdgpu_acpi_dev_info *dev_info; - u16 bdf; + u32 sbdf; if (!tmr_offset || !tmr_size) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; @@ -1157,13 +1160,14 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, { struct amdgpu_acpi_dev_info *dev_info; struct amdgpu_acpi_xcc_info *xcc_info; - u16 bdf; + u32 sbdf; if (!numa_info) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; @@ -1389,14 +1393,11 @@ void amdgpu_acpi_detect(void) struct pci_dev *pdev = NULL; int ret; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - if (!atif->handle) - amdgpu_atif_pci_probe_handle(pdev); - if (!atcs->handle) - amdgpu_atcs_pci_probe_handle(pdev); - } + while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) { + if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) && + (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8)) + continue; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { if (!atif->handle) amdgpu_atif_pci_probe_handle(pdev); if (!atcs->handle) @@ -1493,6 +1494,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) if (adev->asic_type < CHIP_RAVEN) return false; + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return false; + /* * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally * risky to do any special firmware-related preparations for entering @@ -1515,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } +/** + * amdgpu_choose_low_power_state + * + * @adev: amdgpu_device_pointer + * + * Choose the target low power state for the GPU + */ +void amdgpu_choose_low_power_state(struct amdgpu_device *adev) +{ + if (adev->in_runpm) + return; + + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = true; + else if (amdgpu_acpi_is_s3_active(adev)) + adev->in_s3 = true; +} + #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 25d5fda5b..3d126f296 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -28,6 +28,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_dma_buf.h" +#include #include #include #include "amdgpu_xgmi.h" @@ -164,7 +165,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) */ bitmap_complement(gpu_resources.cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap, - KGD_MAX_QUEUES); + AMDGPU_MAX_QUEUES); /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant @@ -172,7 +173,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) last_valid_bit = 1 /* only first MEC can have compute queues */ * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) + for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i) clear_bit(i, gpu_resources.cp_queue_bitmap); amdgpu_doorbell_get_kfd_info(adev, @@ -467,28 +468,6 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) return 100; } -void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info) -{ - struct amdgpu_cu_info acu_info = adev->gfx.cu_info; - - memset(cu_info, 0, sizeof(*cu_info)); - if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap)) - return; - - cu_info->cu_active_number = acu_info.number; - cu_info->cu_ao_mask = acu_info.ao_cu_mask; - memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0], - sizeof(cu_info->cu_bitmap)); - cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; - cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; - cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; - cu_info->simd_per_cu = acu_info.simd_per_cu; - cu_info->max_waves_per_simd = acu_info.max_waves_per_simd; - cu_info->wave_front_size = acu_info.wave_front_size; - cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu; - cu_info->lds_size = acu_info.lds_size; -} - int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, @@ -568,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, struct amdgpu_device *adev = dst, *peer_adev; int num_links; - if (adev->asic_type != CHIP_ALDEBARAN) + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)) return 0; if (src) @@ -704,12 +683,17 @@ err: void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { - /* Temporary workaround to fix issues observed in some - * compute applications when GFXOFF is enabled on GFX11. - */ - if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) { + enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE; + if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 && + ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) { pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); amdgpu_gfx_off_ctrl(adev, idle); + } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) && + (adev->flags & AMD_IS_APU)) { + /* Disable GFXOFF and PG. Temporary workaround + * to fix some compute applications issue on GFX9. + */ + adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state); } amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, @@ -805,11 +789,23 @@ void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) { - u64 tmp; s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); + u64 tmp; if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { - tmp = adev->gmc.mem_partitions[mem_id].size; + if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) { + /* In NPS1 mode, we should restrict the vram reporting + * tied to the ttm_pages_limit which is 1/2 of the system + * memory. For other partition modes, the HBM is uniformly + * divided already per numa node reported. If user wants to + * go beyond the default ttm limit and maximize the ROCm + * allocations, they can go up to max ttm and sysmem limits. + */ + + tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes(); + } else { + tmp = adev->gmc.mem_partitions[mem_id].size; + } do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); return ALIGN_DOWN(tmp, PAGE_SIZE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2fe986072..dac983da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -235,8 +235,6 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); -void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, - struct kfd_cu_info *cu_info); int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, @@ -303,6 +301,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); +void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 469785d33..1ef758ac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) return NULL; fence = container_of(f, struct amdgpu_amdkfd_fence, base); - if (fence && f->ops == &amdkfd_fence_ops) + if (f->ops == &amdkfd_fence_ops) return fence; return NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 490c8f5dd..f6598b9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -300,14 +300,13 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI); for (reg = hqd_base; reg <= hqd_end; reg++) - WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); + WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL), - data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -336,27 +335,24 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO), - lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI), - upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR), - lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), - regCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO, + lower_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI, + upper_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR, + lower_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1), - (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, - queue_id)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR), - REG_SET_FIELD(m->cp_hqd_eop_rptr, - CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR, + REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE, data); kgd_gfx_v9_release_queue(adev, inst); @@ -494,15 +490,15 @@ static uint32_t kgd_gfx_v9_4_3_set_address_watch( VALID, 1); - WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_H) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_high); + watch_address_high, inst); - WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_L) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_low); + watch_address_low, inst); return watch_address_cntl; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index d67d003ba..b61a32d6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -658,7 +658,7 @@ static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 4)) *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3c45a188b..00fbc0f44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -91,8 +91,8 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi { kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ kgd_gfx_v9_unlock_srbm(adev, inst); @@ -239,14 +239,13 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, for (reg = hqd_base; reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++) - WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); + WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL), - data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -275,25 +274,24 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO), - lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI), - upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR), - lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), - upper_32_bits((uintptr_t)wptr)); - WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1, - (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO, + lower_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI, + upper_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR, + lower_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + upper_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR), - REG_SET_FIELD(m->cp_hqd_eop_rptr, - CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR, + REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data); kgd_gfx_v9_release_queue(adev, inst); @@ -556,7 +554,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; } - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { @@ -677,7 +675,7 @@ void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev, int i; uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID, stall ? 1 << vmid : 0); else @@ -908,8 +906,8 @@ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t inst) { - *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), - mmCP_IQ_WAIT_TIME2)); + *wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2); } void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, @@ -1037,7 +1035,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int pasid_tmp; int max_queue_cnt; int vmid_wave_cnt = 0; - DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); + DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES); lock_spi_csq_mutexes(adev); soc15_grbm_select(adev, 1, 0, 0, 0, inst); @@ -1047,7 +1045,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, * to get number of waves in flight */ bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap, - KGD_MAX_QUEUES); + AMDGPU_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; sh_cnt = adev->gfx.config.max_sh_per_se; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e03601113..41fbc4fd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -44,6 +44,7 @@ * changes to accumulate */ #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 +#define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29) /* * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB @@ -117,11 +118,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) return; si_meminfo(&si); - mem = si.freeram - si.freehigh; + mem = si.totalram - si.totalhigh; mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); - kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); + kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); + if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT) + kfd_mem_limit.max_system_mem_limit >>= 1; + else + kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT; + kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT; pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), @@ -419,6 +425,32 @@ validate_fail: return ret; } +static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, + uint32_t domain, + struct dma_fence *fence) +{ + int ret = amdgpu_bo_reserve(bo, false); + + if (ret) + return ret; + + ret = amdgpu_amdkfd_bo_validate(bo, domain, true); + if (ret) + goto unreserve_out; + + ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); + if (ret) + goto unreserve_out; + + dma_resv_add_fence(bo->tbo.base.resv, fence, + DMA_RESV_USAGE_BOOKKEEP); + +unreserve_out: + amdgpu_bo_unreserve(bo); + + return ret; +} + static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) { return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); @@ -733,7 +765,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, enum dma_data_direction dir; if (unlikely(!ttm->sg)) { - pr_err("SG Table of BO is UNEXPECTEDLY NULL"); + pr_debug("SG Table of BO is NULL"); return; } @@ -828,6 +860,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, uint64_t va = mem->va; struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; struct amdgpu_bo *bo[2] = {NULL, NULL}; + struct amdgpu_bo_va *bo_va; bool same_hive = false; int i, ret; @@ -866,9 +899,10 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || - same_hive) { + (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || + same_hive) { /* Mappings on the local GPU, or VRAM mappings in the - * local hive, or userptr mapping can reuse dma map + * local hive, or userptr, or GTT mapping can reuse dma map * address space share the original BO */ attachment[i]->type = KFD_MEM_ATT_SHARED; @@ -914,7 +948,12 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, pr_debug("Unable to reserve BO during memory attach"); goto unwind; } - attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + bo_va = amdgpu_vm_bo_find(vm, bo[i]); + if (!bo_va) + bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + else + ++bo_va->ref_count; + attachment[i]->bo_va = bo_va; amdgpu_bo_unreserve(bo[i]); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; @@ -938,7 +977,8 @@ unwind: continue; if (attachment[i]->bo_va) { amdgpu_bo_reserve(bo[i], true); - amdgpu_vm_bo_del(adev, attachment[i]->bo_va); + if (--attachment[i]->bo_va->ref_count == 0) + amdgpu_vm_bo_del(adev, attachment[i]->bo_va); amdgpu_bo_unreserve(bo[i]); list_del(&attachment[i]->list); } @@ -955,7 +995,8 @@ static void kfd_mem_detach(struct kfd_mem_attachment *attachment) pr_debug("\t remove VA 0x%llx in entry %p\n", attachment->va, attachment); - amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); + if (--attachment->bo_va->ref_count == 0) + amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); drm_gem_object_put(&bo->tbo.base); list_del(&attachment->list); kfree(attachment); @@ -1201,8 +1242,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); amdgpu_sync_fence(sync, bo_va->last_pt_update); - - kfd_mem_dmaunmap_attachment(mem, entry); } static int update_gpuvm_pte(struct kgd_mem *mem, @@ -1257,6 +1296,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, update_gpuvm_pte_failed: unmap_bo_from_gpuvm(mem, entry, sync); + kfd_mem_dmaunmap_attachment(mem, entry); return ret; } @@ -1690,6 +1730,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT) alloc_flags |= AMDGPU_GEM_CREATE_COHERENT; + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT) + alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT; if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED) alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED; @@ -1768,6 +1810,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + } else { + mutex_lock(&avm->process_info->lock); + if (avm->process_info->eviction_fence && + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain, + &avm->process_info->eviction_fence->base); + mutex_unlock(&avm->process_info->lock); + if (ret) + goto err_validate_bo; } if (offset) @@ -1777,6 +1828,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: err_pin_bo: +err_validate_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: @@ -1850,18 +1902,16 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (unlikely(ret)) return ret; - /* The eviction fence should be removed by the last unmap. - * TODO: Log an error condition if the bo still has the eviction fence - * attached - */ amdgpu_amdkfd_remove_eviction_fence(mem->bo, process_info->eviction_fence); pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); /* Remove from VM internal data structures */ - list_for_each_entry_safe(entry, tmp, &mem->attachments, list) + list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { + kfd_mem_dmaunmap_attachment(mem, entry); kfd_mem_detach(entry); + } ret = unreserve_bo_and_vms(&ctx, false, false); @@ -1980,19 +2030,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( if (unlikely(ret)) goto out_unreserve; - if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { - /* Validate BO only once. The eviction fence gets added to BO - * the first time it is mapped. Validate will wait for all - * background evictions to complete. - */ - ret = amdgpu_amdkfd_bo_validate(bo, domain, true); - if (ret) { - pr_debug("Validate failed\n"); - goto out_unreserve; - } - } - list_for_each_entry(entry, &mem->attachments, list) { if (entry->bo_va->base.vm != avm || entry->is_mapped) continue; @@ -2019,10 +2056,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( mem->mapped_to_gpu_memory); } - if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) - dma_resv_add_fence(bo->tbo.base.resv, - &avm->process_info->eviction_fence->base, - DMA_RESV_USAGE_BOOKKEEP); ret = unreserve_bo_and_vms(&ctx, false, false); goto out; @@ -2035,11 +2068,27 @@ out: return ret; } +void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) +{ + struct kfd_mem_attachment *entry; + struct amdgpu_vm *vm; + + vm = drm_priv_to_vm(drm_priv); + + mutex_lock(&mem->lock); + + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm == vm) + kfd_mem_dmaunmap_attachment(mem, entry); + } + + mutex_unlock(&mem->lock); +} + int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); - struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; @@ -2080,15 +2129,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( mem->mapped_to_gpu_memory); } - /* If BO is unmapped from all VMs, unfence it. It can be evicted if - * required. - */ - if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && - !mem->bo->tbo.pin_count) - amdgpu_amdkfd_remove_eviction_fence(mem->bo, - process_info->eviction_fence); - unreserve_out: unreserve_bo_and_vms(&ctx, false, false); out: @@ -2316,8 +2356,20 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, amdgpu_sync_create(&(*mem)->sync); (*mem)->is_imported = true; + mutex_lock(&avm->process_info->lock); + if (avm->process_info->eviction_fence && + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, + &avm->process_info->eviction_fence->base); + mutex_unlock(&avm->process_info->lock); + if (ret) + goto err_remove_mem; + return 0; +err_remove_mem: + remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_vma_node_revoke(&obj->vma_node, drm_priv); err_free_mem: kfree(*mem); err_put_obj: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index f3a09ecb7..618e469e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -292,7 +292,11 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) if (dev_is_removable(&adev->pdev->dev)) return false; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) { + if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) && + (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8)) + continue; + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; @@ -304,20 +308,6 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) } } - if (!found) { - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { - dhandle = ACPI_HANDLE(&pdev->dev); - if (!dhandle) - continue; - - status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (ACPI_SUCCESS(status)) { - found = true; - break; - } - } - } - if (!found) return false; pci_dev_put(pdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 9a53ca555..702f6610d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -75,27 +75,17 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list_entry *array; struct amdgpu_bo_list *list; uint64_t total_size = 0; - size_t size; unsigned i; int r; - if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) - / sizeof(struct amdgpu_bo_list_entry)) - return -EINVAL; - - size = sizeof(struct amdgpu_bo_list); - size += num_entries * sizeof(struct amdgpu_bo_list_entry); - list = kvmalloc(size, GFP_KERNEL); + list = kvzalloc(struct_size(list, entries, num_entries), GFP_KERNEL); if (!list) return -ENOMEM; kref_init(&list->refcount); - list->gds_obj = NULL; - list->gws_obj = NULL; - list->oa_obj = NULL; - array = amdgpu_bo_list_array_entry(list, 0); - memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); + list->num_entries = num_entries; + array = list->entries; for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry; @@ -140,7 +130,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, } list->first_userptr = first_userptr; - list->num_entries = num_entries; sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry), amdgpu_bo_list_entry_cmp, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 26c01cb13..555cd6d87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -55,6 +55,8 @@ struct amdgpu_bo_list { /* Protect access during command submission. */ struct mutex bo_list_mutex; + + struct amdgpu_bo_list_entry entries[] __counted_by(num_entries); }; int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, @@ -69,22 +71,14 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, size_t num_entries, struct amdgpu_bo_list **list); -static inline struct amdgpu_bo_list_entry * -amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index) -{ - struct amdgpu_bo_list_entry *array = (void *)&list[1]; - - return &array[index]; -} - #define amdgpu_bo_list_for_each_entry(e, list) \ - for (e = amdgpu_bo_list_array_entry(list, 0); \ - e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \ + for (e = list->entries; \ + e != &list->entries[list->num_entries]; \ ++e) #define amdgpu_bo_list_for_each_userptr_entry(e, list) \ - for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \ - e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \ + for (e = &list->entries[list->first_userptr]; \ + e != &list->entries[list->num_entries]; \ ++e) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index d34037b85..7473a42f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -264,16 +264,9 @@ struct edid *amdgpu_connector_edid(struct drm_connector *connector) static struct edid * amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev) { - struct edid *edid; - if (adev->mode_info.bios_hardcoded_edid) { - edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); - if (edid) { - memcpy((unsigned char *)edid, - (unsigned char *)adev->mode_info.bios_hardcoded_edid, - adev->mode_info.bios_hardcoded_edid_size); - return edid; - } + return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid, + adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); } return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c0a3afe81..e50be6500 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1117,6 +1117,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) return r; } + /* FIXME: In theory this loop shouldn't be needed any more when + * amdgpu_vm_handle_moved handles all moved BOs that are reserved + * with p->ticket. But removing it caused test regressions, so I'm + * leaving it here for now. + */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { bo_va = e->bo_va; if (bo_va == NULL) @@ -1131,7 +1136,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) return r; } - r = amdgpu_vm_handle_moved(adev, vm); + r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket); if (r) return r; @@ -1152,7 +1157,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); } - if (amdgpu_vm_debug) { + if (adev->debug_vm) { /* Invalidate all BOs to test for userspace bugs */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->bo; @@ -1393,8 +1398,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(&parser, adev, filp, data); if (r) { - if (printk_ratelimit()) - DRM_ERROR("Failed to initialize parser %d!\n", r); + DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 76549c2cf..e2ae9ba14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -42,6 +42,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_DEC] = 1, [AMDGPU_HW_IP_VCN_ENC] = 1, [AMDGPU_HW_IP_VCN_JPEG] = 1, + [AMDGPU_HW_IP_VPE] = 1, }; bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 418ff7cd6..8d4a3ff65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2028,8 +2028,8 @@ static ssize_t amdgpu_reset_dump_register_list_read(struct file *f, if (ret) return ret; - for (i = 0; i < adev->num_regs; i++) { - sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]); + for (i = 0; i < adev->reset_info.num_regs; i++) { + sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]); up_read(&adev->reset_domain->sem); if (copy_to_user(buf + len, reg_offset, strlen(reg_offset))) return -EFAULT; @@ -2086,9 +2086,9 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f, if (ret) goto error_free; - swap(adev->reset_dump_reg_list, tmp); - swap(adev->reset_dump_reg_value, new); - adev->num_regs = i; + swap(adev->reset_info.reset_dump_reg_list, tmp); + swap(adev->reset_info.reset_dump_reg_value, new); + adev->reset_info.num_regs = i; up_write(&adev->reset_domain->sem); ret = size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 56d99ffbb..7f48c7ec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -32,8 +32,6 @@ #include #include #include -#include -#include #include #include @@ -75,6 +73,7 @@ #include "amdgpu_pmu.h" #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" +#include "amdgpu_virt.h" #include #include @@ -163,6 +162,74 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL); +/** + * DOC: board_info + * + * The amdgpu driver provides a sysfs API for giving board related information. + * It provides the form factor information in the format + * + * type : form factor + * + * Possible form factor values + * + * - "cem" - PCIE CEM card + * - "oam" - Open Compute Accelerator Module + * - "unknown" - Not known + * + */ + +static ssize_t amdgpu_device_get_board_info(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM; + const char *pkg; + + if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) + pkg_type = adev->smuio.funcs->get_pkg_type(adev); + + switch (pkg_type) { + case AMDGPU_PKG_TYPE_CEM: + pkg = "cem"; + break; + case AMDGPU_PKG_TYPE_OAM: + pkg = "oam"; + break; + default: + pkg = "unknown"; + break; + } + + return sysfs_emit(buf, "%s : %s\n", "type", pkg); +} + +static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL); + +static struct attribute *amdgpu_board_attrs[] = { + &dev_attr_board_info.attr, + NULL, +}; + +static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (adev->flags & AMD_IS_APU) + return 0; + + return attr->mode; +} + +static const struct attribute_group amdgpu_board_attrs_group = { + .attrs = amdgpu_board_attrs, + .is_visible = amdgpu_board_attrs_is_visible +}; + static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); @@ -406,7 +473,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { - ret = amdgpu_kiq_rreg(adev, reg); + ret = amdgpu_kiq_rreg(adev, reg, 0); up_read(&adev->reset_domain->sem); } else { ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -443,6 +510,49 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) BUG(); } + +/** + * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @acc_flags: access flags which require special behavior + * @xcc_id: xcc accelerated compute core id + * + * Returns the 32 bit value from the offset specified. + */ +uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags, + uint32_t xcc_id) +{ + uint32_t ret, rlcg_flag; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if ((reg * 4) < adev->rmmio_size) { + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + adev->gfx.rlc.rlcg_reg_access_supported && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, + GC_HWIP, false, + &rlcg_flag)) { + ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id); + } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_domain->sem)) { + ret = amdgpu_kiq_rreg(adev, reg, xcc_id); + up_read(&adev->reset_domain->sem); + } else { + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + ret = adev->pcie_rreg(adev, reg * 4); + } + + return ret; +} + /* * MMIO register write with bytes helper functions * @offset:bytes offset from MMIO start @@ -490,7 +600,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { - amdgpu_kiq_wreg(adev, reg, v); + amdgpu_kiq_wreg(adev, reg, v, 0); up_read(&adev->reset_domain->sem); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); @@ -508,6 +618,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @reg: mmio/rlc register * @v: value to write + * @xcc_id: xcc accelerated compute core id * * this function is invoked only for the debugfs register access */ @@ -530,6 +641,47 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, } } +/** + * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @v: 32 bit value to write to the register + * @acc_flags: access flags which require special behavior + * @xcc_id: xcc accelerated compute core id + * + * Writes the value specified to the offset specified. + */ +void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags, uint32_t xcc_id) +{ + uint32_t rlcg_flag; + + if (amdgpu_device_skip_hw_access(adev)) + return; + + if ((reg * 4) < adev->rmmio_size) { + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + adev->gfx.rlc.rlcg_reg_access_supported && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, + GC_HWIP, true, + &rlcg_flag)) { + amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id); + } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_kiq_wreg(adev, reg, v, xcc_id); + up_read(&adev->reset_domain->sem); + } else { + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + adev->pcie_wreg(adev, reg * 4, v); + } +} + /** * amdgpu_device_indirect_rreg - read an indirect register * @@ -572,7 +724,7 @@ u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); - if (adev->nbio.funcs->get_pcie_index_hi_offset) + if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); else pcie_index_hi = 0; @@ -639,6 +791,56 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, return r; } +u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, + u64 reg_addr) +{ + unsigned long flags, pcie_index, pcie_data; + unsigned long pcie_index_hi = 0; + void __iomem *pcie_index_offset; + void __iomem *pcie_index_hi_offset; + void __iomem *pcie_data_offset; + u64 r; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) + pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + if (pcie_index_hi != 0) + pcie_index_hi_offset = (void __iomem *)adev->rmmio + + pcie_index_hi * 4; + + /* read low 32 bits */ + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + r = readl(pcie_data_offset); + /* read high 32 bits */ + writel(reg_addr + 4, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + r |= ((u64)readl(pcie_data_offset) << 32); + + /* clear the high bits */ + if (pcie_index_hi != 0) { + writel(0, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + return r; +} + /** * amdgpu_device_indirect_wreg - write an indirect register address * @@ -678,7 +880,7 @@ void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); - if (adev->nbio.funcs->get_pcie_index_hi_offset) + if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); else pcie_index_hi = 0; @@ -743,6 +945,55 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, + u64 reg_addr, u64 reg_data) +{ + unsigned long flags, pcie_index, pcie_data; + unsigned long pcie_index_hi = 0; + void __iomem *pcie_index_offset; + void __iomem *pcie_index_hi_offset; + void __iomem *pcie_data_offset; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) + pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + if (pcie_index_hi != 0) + pcie_index_hi_offset = (void __iomem *)adev->rmmio + + pcie_index_hi * 4; + + /* write low 32 bits */ + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); + readl(pcie_data_offset); + /* write high 32 bits */ + writel(reg_addr + 4, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + writel((u32)(reg_data >> 32), pcie_data_offset); + readl(pcie_data_offset); + + /* clear the high bits */ + if (pcie_index_hi != 0) { + writel(0, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + /** * amdgpu_device_get_rev_id - query device rev_id * @@ -820,6 +1071,13 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) return 0; } +static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) +{ + DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + BUG(); + return 0; +} + /** * amdgpu_invalid_wreg64 - dummy reg write function * @@ -837,6 +1095,13 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint BUG(); } +static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) +{ + DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", + reg, v); + BUG(); +} + /** * amdgpu_block_invalid_rreg - dummy reg read function * @@ -890,10 +1155,12 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) amdgpu_asic_pre_asic_init(adev); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) || - adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_psp_wait_for_bootloader(adev); ret = amdgpu_atomfirmware_asic_init(adev, true); + /* TODO: check the return val and stop device initialization if boot fails */ + amdgpu_psp_query_boot_status(adev); return ret; } else { return amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1218,6 +1485,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) return true; fw_ver = *((uint32_t *)adev->pm.fw->data + 69); + release_firmware(adev->pm.fw); if (fw_ver < 0x00160e00) return true; } @@ -1246,14 +1514,45 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* - * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic - * speed switching. Until we have confirmation from Intel that a specific host - * supports it, it's safer that we keep it disabled for all. + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on DCE 3.0 or later. + * If users report that it works on older ASICS as well, we may + * loosen this. + */ +bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) +{ + switch (amdgpu_seamless) { + case -1: + break; + case 1: + return true; + case 0: + return false; + default: + DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", + amdgpu_seamless); + return false; + } + + if (!(adev->flags & AMD_IS_APU)) + return false; + + if (adev->mman.keep_stolen_vga_memory) + return false; + + return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0); +} + +/* + * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids + * don't support dynamic speed switching. Until we have confirmation from Intel + * that a specific host supports it, it's safer that we keep it disabled for all. * * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 */ -bool amdgpu_device_pcie_dynamic_switching_supported(void) +static bool amdgpu_device_pcie_dynamic_switching_supported(void) { #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); @@ -1286,20 +1585,13 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) default: return false; } + if (adev->flags & AMD_IS_APU) + return false; + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) + return false; return pcie_aspm_enabled(adev->pdev); } -bool amdgpu_device_aspm_support_quirk(void) -{ -#if IS_ENABLED(CONFIG_X86) - struct cpuinfo_x86 *c = &cpu_data(0); - - return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); -#else - return true; -#endif -} - /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_device_vga_set_decode - enable/disable vga decode @@ -1548,6 +1840,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, } else { pr_info("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + amdgpu_device_prepare(dev); amdgpu_device_suspend(dev, true); amdgpu_device_cache_pci_state(pdev); /* Shut down the device */ @@ -2275,6 +2568,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) } r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + DRM_SCHED_PRIORITY_COUNT, ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, ring->sched_score, ring->name, @@ -2284,6 +2578,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) ring->name); return r; } + r = amdgpu_uvd_entity_init(adev, ring); + if (r) { + DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", + ring->name); + return r; + } + r = amdgpu_vce_entity_init(adev, ring); + if (r) { + DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", + ring->name); + return r; + } } amdgpu_xcp_update_partition_sched_list(adev); @@ -2444,6 +2750,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + /* Don't init kfd if whole hive need to be reset during init */ if (!adev->gmc.xgmi.pending_reset) { kgd2kfd_init_zone_device(adev); @@ -2726,7 +3035,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { int i, r; - if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -2979,8 +3288,10 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ if (adev->in_s0ix && - (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) && - (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) + (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= + IP_VERSION(5, 0, 0)) && + (adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) continue; /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. @@ -3039,6 +3350,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) amdgpu_virt_request_full_gpu(adev, false); } + amdgpu_ttm_set_buffer_funcs_status(adev, false); + r = amdgpu_device_ip_suspend_phase1(adev); if (r) return r; @@ -3228,6 +3541,9 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) r = amdgpu_device_ip_resume_phase2(adev); + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + return r; } @@ -3357,9 +3673,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (adev->asic_reset_res) goto fail; - if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && - adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) - adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); } else { task_barrier_full(&hive->tb); @@ -3533,6 +3847,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->pciep_wreg = &amdgpu_invalid_wreg; adev->pcie_rreg64 = &amdgpu_invalid_rreg64; adev->pcie_wreg64 = &amdgpu_invalid_wreg64; + adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext; + adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext; adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; @@ -3588,6 +3904,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->ras_list); + INIT_LIST_HEAD(&adev->pm.od_kobj_list); + INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, @@ -3684,7 +4002,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, * internal path natively support atomics, set have_atomics_support to true. */ } else if ((adev->flags & AMD_IS_APU) && - (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) { + (amdgpu_ip_version(adev, GC_HWIP, 0) > + IP_VERSION(9, 0, 0))) { adev->have_atomics_support = true; } else { adev->have_atomics_support = @@ -3824,22 +4143,6 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); - r = amdgpu_atombios_sysfs_init(adev); - if (r) - drm_err(&adev->ddev, - "registering atombios sysfs failed (%d).\n", r); - - r = amdgpu_pm_sysfs_init(adev); - if (r) - DRM_ERROR("registering pm sysfs failed (%d).\n", r); - - r = amdgpu_ucode_sysfs_init(adev); - if (r) { - adev->ucode_sysfs_en = false; - DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); - } else - adev->ucode_sysfs_en = true; - /* * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. * Otherwise the mgpu fan boost feature will be skipped due to the @@ -3868,10 +4171,36 @@ fence_driver_init: flush_delayed_work(&adev->delayed_init_work); } + /* + * Place those sysfs registering after `late_init`. As some of those + * operations performed in `late_init` might affect the sysfs + * interfaces creating. + */ + r = amdgpu_atombios_sysfs_init(adev); + if (r) + drm_err(&adev->ddev, + "registering atombios sysfs failed (%d).\n", r); + + r = amdgpu_pm_sysfs_init(adev); + if (r) + DRM_ERROR("registering pm sysfs failed (%d).\n", r); + + r = amdgpu_ucode_sysfs_init(adev); + if (r) { + adev->ucode_sysfs_en = false; + DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; + r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); if (r) dev_err(adev->dev, "Could not create amdgpu device attr\n"); + r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); + if (r) + dev_err(adev->dev, + "Could not create amdgpu board attributes\n"); + amdgpu_fru_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) @@ -3998,6 +4327,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); + amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_device_ip_fini_early(adev); amdgpu_irq_fini_hw(adev); @@ -4035,6 +4366,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; + kfree(adev->fru_info); + adev->fru_info = NULL; + px = amdgpu_device_supports_px(adev_to_drm(adev)); if (px || (!dev_is_removable(&adev->pdev->dev) && @@ -4093,6 +4427,48 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) /* * Suspend & resume. */ +/** + * amdgpu_device_prepare - prepare for device suspend + * + * @dev: drm dev pointer + * + * Prepare to put the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ +int amdgpu_device_prepare(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int i, r; + + amdgpu_choose_low_power_state(adev); + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + /* Evict the majority of BOs before starting suspend sequence */ + r = amdgpu_device_evict_resources(adev); + if (r) + goto unprepare; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].version->funcs->prepare_suspend) + continue; + r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + if (r) + goto unprepare; + } + + return 0; + +unprepare: + adev->in_s0ix = adev->in_s3 = false; + + return r; +} + /** * amdgpu_device_suspend - initiate device suspend * @@ -4113,11 +4489,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) adev->in_suspend = true; - /* Evict the majority of BOs before grabbing the full access */ - r = amdgpu_device_evict_resources(adev); - if (r) - return r; - if (amdgpu_sriov_vf(adev)) { amdgpu_virt_fini_data_exchange(adev); r = amdgpu_virt_request_full_gpu(adev, false); @@ -4132,7 +4503,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); - flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); @@ -4145,6 +4515,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (r) return r; + amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_fence_driver_hw_fini(adev); amdgpu_device_ip_suspend_phase2(adev); @@ -4152,6 +4524,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); + r = amdgpu_dpm_notify_rlc_state(adev, false); + if (r) + return r; + return 0; } @@ -4197,19 +4573,18 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } amdgpu_fence_driver_hw_init(adev); - r = amdgpu_device_ip_late_init(adev); - if (r) - goto exit; - - queue_delayed_work(system_wq, &adev->delayed_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); - if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); if (r) goto exit; } + r = amdgpu_device_ip_late_init(adev); + if (r) + goto exit; + + queue_delayed_work(system_wq, &adev->delayed_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); exit: if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_data_exchange(adev); @@ -4777,67 +5152,16 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) lockdep_assert_held(&adev->reset_domain->sem); - for (i = 0; i < adev->num_regs; i++) { - adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]); - trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], - adev->reset_dump_reg_value[i]); - } - - return 0; -} + for (i = 0; i < adev->reset_info.num_regs; i++) { + adev->reset_info.reset_dump_reg_value[i] = + RREG32(adev->reset_info.reset_dump_reg_list[i]); -#ifdef CONFIG_DEV_COREDUMP -static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset, - size_t count, void *data, size_t datalen) -{ - struct drm_printer p; - struct amdgpu_device *adev = data; - struct drm_print_iterator iter; - int i; - - iter.data = buffer; - iter.offset = 0; - iter.start = offset; - iter.remain = count; - - p = drm_coredump_printer(&iter); - - drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); - drm_printf(&p, "kernel: " UTS_RELEASE "\n"); - drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec); - if (adev->reset_task_info.pid) - drm_printf(&p, "process_name: %s PID: %d\n", - adev->reset_task_info.process_name, - adev->reset_task_info.pid); - - if (adev->reset_vram_lost) - drm_printf(&p, "VRAM is lost due to GPU reset!\n"); - if (adev->num_regs) { - drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); - - for (i = 0; i < adev->num_regs; i++) - drm_printf(&p, "0x%08x: 0x%08x\n", - adev->reset_dump_reg_list[i], - adev->reset_dump_reg_value[i]); + trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i], + adev->reset_info.reset_dump_reg_value[i]); } - return count - iter.remain; -} - -static void amdgpu_devcoredump_free(void *data) -{ -} - -static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - - ktime_get_ts64(&adev->reset_time); - dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT, - amdgpu_devcoredump_read, amdgpu_devcoredump_free); + return 0; } -#endif int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context) @@ -4845,7 +5169,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_device *tmp_adev = NULL; bool need_full_reset, skip_hw_reset, vram_lost = false; int r = 0; - bool gpu_reset_for_dev_remove = 0; /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, @@ -4865,10 +5188,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); - gpu_reset_for_dev_remove = - test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - /* * ASIC reset has to be done on all XGMI hive nodes ASAP * to allow proper links negotiation in FW (within 1 sec) @@ -4886,7 +5205,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) { dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", r, adev_to_drm(tmp_adev)->unique); - break; + goto out; } } @@ -4905,26 +5224,12 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (!r && amdgpu_ras_intr_triggered()) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && - tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) - tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev); + amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); } amdgpu_ras_intr_cleared(); } - /* Since the mode1 reset affects base ip blocks, the - * phase1 ip blocks need to be resumed. Otherwise there - * will be a BIOS signature error and the psp bootloader - * can't load kdb on the next amdgpu install. - */ - if (gpu_reset_for_dev_remove) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) - amdgpu_device_ip_resume_phase1(tmp_adev); - - goto end; - } - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { if (need_full_reset) { /* post card */ @@ -4939,15 +5244,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, goto out; vram_lost = amdgpu_device_check_vram_lost(tmp_adev); -#ifdef CONFIG_DEV_COREDUMP - tmp_adev->reset_vram_lost = vram_lost; - memset(&tmp_adev->reset_task_info, 0, - sizeof(tmp_adev->reset_task_info)); - if (reset_context->job && reset_context->job->vm) - tmp_adev->reset_task_info = - reset_context->job->vm->task_info; - amdgpu_reset_capture_coredumpm(tmp_adev); -#endif + + amdgpu_coredump(tmp_adev, vram_lost, reset_context); + if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); amdgpu_inc_vram_lost(tmp_adev); @@ -4957,10 +5256,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) return r; + r = amdgpu_xcp_restore_partition_mode( + tmp_adev->xcp_mgr); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase2(tmp_adev); if (r) goto out; + if (tmp_adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); + if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); @@ -5159,11 +5466,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int i, r = 0; bool need_emergency_restart = false; bool audio_suspended = false; - bool gpu_reset_for_dev_remove = false; - - gpu_reset_for_dev_remove = - test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); /* * Special case: RAS triggered and full reset isn't supported @@ -5201,7 +5503,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { list_add_tail(&tmp_adev->reset_list, &device_list); - if (gpu_reset_for_dev_remove && adev->shutdown) + if (adev->shutdown) tmp_adev->shutdown = true; } if (!list_is_first(&adev->reset_list, &device_list)) @@ -5286,10 +5588,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, retry: /* Rest of adevs pre asic reset from XGMI hive. */ list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (gpu_reset_for_dev_remove) { - /* Workaroud for ASICs need to disable SMC first */ - amdgpu_device_smu_fini_early(tmp_adev); - } r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); /*TODO Should we stop ?*/ if (r) { @@ -5313,16 +5611,14 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ adev->asic_reset_res = r; /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); if (r && r == -EAGAIN) goto retry; - - if (!r && gpu_reset_for_dev_remove) - goto recover_end; } skip_hw_reset: @@ -5339,9 +5635,6 @@ skip_hw_reset: drm_sched_start(&ring->sched, true); } - if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) - amdgpu_mes_self_test(tmp_adev); - if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); @@ -5381,7 +5674,6 @@ skip_sched_resume: amdgpu_ras_set_error_query_ready(tmp_adev, true); } -recover_end: tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); @@ -6016,7 +6308,7 @@ bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) return true; default: /* IP discovery */ - if (!adev->ip_versions[DCE_HWIP][0] || + if (!amdgpu_ip_version(adev, DCE_HWIP, 0) || (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 68a901287..c7d60dd0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -35,10 +35,12 @@ #include "df_v1_7.h" #include "df_v3_6.h" #include "df_v4_3.h" +#include "df_v4_6_2.h" #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" #include "nbio_v7_9.h" +#include "nbio_v7_11.h" #include "hdp_v4_0.h" #include "vega10_ih.h" #include "vega20_ih.h" @@ -80,6 +82,8 @@ #include "jpeg_v4_0.h" #include "vcn_v4_0_3.h" #include "jpeg_v4_0_3.h" +#include "vcn_v4_0_5.h" +#include "jpeg_v4_0_5.h" #include "amdgpu_vkms.h" #include "mes_v10_1.h" #include "mes_v11_0.h" @@ -89,6 +93,8 @@ #include "smuio_v13_0_3.h" #include "smuio_v13_0_6.h" +#include "amdgpu_vpe.h" + #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); @@ -175,6 +181,7 @@ static const char *hw_id_names[HW_ID_MAX] = { [XGMI_HWID] = "XGMI", [XGBE_HWID] = "XGBE", [MP0_HWID] = "MP0", + [VPE_HWID] = "VPE", }; static int hw_id_map[MAX_HWIP] = { @@ -204,6 +211,7 @@ static int hw_id_map[MAX_HWIP] = { [XGMI_HWIP] = XGMI_HWID, [DCI_HWIP] = DCI_HWID, [PCIE_HWIP] = PCIE_HWID, + [VPE_HWIP] = VPE_HWID, }; static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) @@ -323,8 +331,8 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) * So far, apply this quirk only on those Navy Flounder boards which * have a bad harvest table of VCN config. */ - if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { + if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { switch (adev->pdev->revision) { case 0xC1: case 0xC2: @@ -674,7 +682,7 @@ struct ip_hw_instance { u8 harvest; int num_base_addresses; - u32 base_addr[]; + u32 base_addr[] __counted_by(num_base_addresses); }; struct ip_hw_id { @@ -1203,6 +1211,7 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) { + uint8_t num_base_address, subrev, variant; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; @@ -1211,7 +1220,6 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) uint16_t ip_offset; uint16_t num_dies; uint16_t num_ips; - uint8_t num_base_address; int hw_ip; int i, j, k; int r; @@ -1349,8 +1357,22 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) * example. On most chips there are multiple instances * with the same HWID. */ - adev->ip_versions[hw_ip][ip->instance_number] = - IP_VERSION(ip->major, ip->minor, ip->revision); + + if (ihdr->version < 3) { + subrev = 0; + variant = 0; + } else { + subrev = ip->sub_revision; + variant = ip->variant; + } + + adev->ip_versions[hw_ip] + [ip->instance_number] = + IP_VERSION_FULL(ip->major, + ip->minor, + ip->revision, + variant, + subrev); } } @@ -1375,8 +1397,8 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) * so read harvest bit per IP data structure to set * harvest configuration. */ - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) && - adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) { if ((adev->pdev->device == 0x731E && (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) || @@ -1451,12 +1473,12 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / le32_to_cpu(gc_info->v1.gc_num_sa_per_se); adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); - if (gc_info->v1.header.version_minor >= 1) { + if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) { adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); } - if (gc_info->v1.header.version_minor >= 2) { + if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) { adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); @@ -1485,7 +1507,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / le32_to_cpu(gc_info->v2.gc_num_sh_per_se); adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); - if (gc_info->v2.header.version_minor == 1) { + if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ @@ -1619,7 +1641,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) { /* what IP to use for this? */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 1): @@ -1651,12 +1673,13 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); break; default: dev_err(adev->dev, "Failed to add common ip block(GC_HWIP:0x%x)\n", - adev->ip_versions[GC_HWIP][0]); + amdgpu_ip_version(adev, GC_HWIP, 0)); return -EINVAL; } return 0; @@ -1665,7 +1688,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) { /* use GC or MMHUB IP version */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 1): @@ -1697,12 +1720,12 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); break; default: - dev_err(adev->dev, - "Failed to add gmc ip block(GC_HWIP:0x%x)\n", - adev->ip_versions[GC_HWIP][0]); + dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + amdgpu_ip_version(adev, GC_HWIP, 0)); return -EINVAL; } return 0; @@ -1710,7 +1733,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[OSSSYS_HWIP][0]) { + switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 1): case IP_VERSION(4, 1, 0): @@ -1743,7 +1766,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", - adev->ip_versions[OSSSYS_HWIP][0]); + amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); return -EINVAL; } return 0; @@ -1751,7 +1774,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0): amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); break; @@ -1797,7 +1820,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add psp ip block(MP0_HWIP:0x%x)\n", - adev->ip_versions[MP0_HWIP][0]); + amdgpu_ip_version(adev, MP0_HWIP, 0)); return -EINVAL; } return 0; @@ -1805,7 +1828,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): @@ -1843,10 +1866,13 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 11): amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; + case IP_VERSION(14, 0, 0): + amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); + break; default: dev_err(adev->dev, "Failed to add smu ip block(MP1_HWIP:0x%x)\n", - adev->ip_versions[MP1_HWIP][0]); + amdgpu_ip_version(adev, MP1_HWIP, 0)); return -EINVAL; } return 0; @@ -1871,8 +1897,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) return 0; #if defined(CONFIG_DRM_AMD_DC) - if (adev->ip_versions[DCE_HWIP][0]) { - switch (adev->ip_versions[DCE_HWIP][0]) { + if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): @@ -1890,6 +1916,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): if (amdgpu_sriov_vf(adev)) amdgpu_discovery_set_sriov_display(adev); else @@ -1898,11 +1925,11 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add dm ip block(DCE_HWIP:0x%x)\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } - } else if (adev->ip_versions[DCI_HWIP][0]) { - switch (adev->ip_versions[DCI_HWIP][0]) { + } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { + switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): case IP_VERSION(12, 1, 0): @@ -1914,7 +1941,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add dm ip block(DCI_HWIP:0x%x)\n", - adev->ip_versions[DCI_HWIP][0]); + amdgpu_ip_version(adev, DCI_HWIP, 0)); return -EINVAL; } } @@ -1924,7 +1951,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 1): @@ -1936,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); break; case IP_VERSION(9, 4, 3): - if (!amdgpu_exp_hw_support) - return -EINVAL; amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); break; case IP_VERSION(10, 1, 10): @@ -1960,12 +1985,12 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); break; default: - dev_err(adev->dev, - "Failed to add gfx ip block(GC_HWIP:0x%x)\n", - adev->ip_versions[GC_HWIP][0]); + dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + amdgpu_ip_version(adev, GC_HWIP, 0)); return -EINVAL; } return 0; @@ -1973,7 +1998,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 1): case IP_VERSION(4, 1, 0): @@ -2013,7 +2038,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", - adev->ip_versions[SDMA0_HWIP][0]); + amdgpu_ip_version(adev, SDMA0_HWIP, 0)); return -EINVAL; } return 0; @@ -2021,8 +2046,8 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) { - if (adev->ip_versions[VCE_HWIP][0]) { - switch (adev->ip_versions[UVD_HWIP][0]) { + if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(7, 0, 0): case IP_VERSION(7, 2, 0): /* UVD is not supported on vega20 SR-IOV */ @@ -2032,10 +2057,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", - adev->ip_versions[UVD_HWIP][0]); + amdgpu_ip_version(adev, UVD_HWIP, 0)); return -EINVAL; } - switch (adev->ip_versions[VCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 1, 0): /* VCE is not supported on vega20 SR-IOV */ @@ -2045,11 +2070,11 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) default: dev_err(adev->dev, "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", - adev->ip_versions[VCE_HWIP][0]); + amdgpu_ip_version(adev, VCE_HWIP, 0)); return -EINVAL; } } else { - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); @@ -2093,10 +2118,14 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); break; + case IP_VERSION(4, 0, 5): + amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); + break; default: dev_err(adev->dev, "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", - adev->ip_versions[UVD_HWIP][0]); + amdgpu_ip_version(adev, UVD_HWIP, 0)); return -EINVAL; } } @@ -2105,7 +2134,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -2130,6 +2159,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); adev->enable_mes = true; adev->enable_mes_kiq = true; @@ -2142,7 +2172,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): aqua_vanjaram_init_soc_config(adev); break; @@ -2151,6 +2181,35 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) } } +static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { + case IP_VERSION(6, 1, 0): + amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); + break; + default: + break; + } + + return 0; +} + +static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { + case IP_VERSION(4, 0, 5): + if (amdgpu_umsch_mm & 0x1) { + amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); + adev->enable_umsch_mm = true; + } + break; + default: + break; + } + + return 0; +} + int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) { int r; @@ -2331,7 +2390,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init_soc_config(adev); amdgpu_discovery_sysfs_init(adev); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -2378,11 +2437,14 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 4): adev->family = AMDGPU_FAMILY_GC_11_0_1; break; + case IP_VERSION(11, 5, 0): + adev->family = AMDGPU_FAMILY_GC_11_5_0; + break; default: return -EINVAL; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): case IP_VERSION(9, 3, 0): @@ -2394,17 +2456,21 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(10, 3, 7): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): adev->flags |= AMD_IS_APU; break; default: break; } - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) adev->gmc.xgmi.supported = true; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) + adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0); + /* set NBIO version */ - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(6, 1, 0): case IP_VERSION(6, 2, 0): adev->nbio.funcs = &nbio_v6_1_funcs; @@ -2426,6 +2492,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.funcs = &nbio_v7_9_funcs; adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; break; + case IP_VERSION(7, 11, 0): + adev->nbio.funcs = &nbio_v7_11_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; + break; case IP_VERSION(7, 2, 0): case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): @@ -2462,7 +2532,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[HDP_HWIP][0]) { + switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 1): case IP_VERSION(4, 1, 0): @@ -2494,7 +2564,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[DF_HWIP][0]) { + switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { case IP_VERSION(3, 6, 0): case IP_VERSION(3, 6, 1): case IP_VERSION(3, 6, 2): @@ -2510,11 +2580,14 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 3, 0): adev->df.funcs = &df_v4_3_funcs; break; + case IP_VERSION(4, 6, 2): + adev->df.funcs = &df_v4_6_2_funcs; + break; default: break; } - switch (adev->ip_versions[SMUIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(9, 0, 1): case IP_VERSION(10, 0, 0): @@ -2557,7 +2630,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[LSDMA_HWIP][0]) { + switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): @@ -2630,6 +2703,14 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_discovery_set_vpe_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 3a2f347bd..4d03cd5b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,7 +24,7 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ -#define DISCOVERY_TMR_SIZE (8 << 10) +#define DISCOVERY_TMR_SIZE (10 << 10) #define DISCOVERY_TMR_OFFSET (64 << 10) void amdgpu_discovery_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 578aeba49..b8fbe97ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -763,11 +763,13 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) return -EINVAL; } - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) version = AMD_FMT_MOD_TILE_VER_GFX11; - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(10, 3, 0)) version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(10, 0, 0)) version = AMD_FMT_MOD_TILE_VER_GFX10; else version = AMD_FMT_MOD_TILE_VER_GFX9; @@ -776,13 +778,15 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) case 0: /* Z microtiling */ return -EINVAL; case 1: /* S microtiling */ - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0)) { if (!has_xor) version = AMD_FMT_MOD_TILE_VER_GFX9; } break; case 2: - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0)) { if (!has_xor && afb->base.format->cpp[0] != 4) version = AMD_FMT_MOD_TILE_VER_GFX9; } @@ -835,10 +839,12 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) u64 render_dcc_offset; /* Enable constant encode on RAVEN2 and later. */ - bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN || - (adev->asic_type == CHIP_RAVEN && - adev->external_rev_id >= 0x81)) && - adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0); + bool dcc_constant_encode = + (adev->asic_type > CHIP_RAVEN || + (adev->asic_type == CHIP_RAVEN && + adev->external_rev_id >= 0x81)) && + amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0); int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B : dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B : @@ -875,7 +881,9 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) if (adev->family >= AMDGPU_FAMILY_NV) { int extra_pipe = 0; - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) && + if ((amdgpu_ip_version(adev, GC_HWIP, + 0) >= + IP_VERSION(10, 3, 0)) && pipes == packers && pipes > 1) extra_pipe = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ba3a87cb8..e7e87a3b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -331,6 +331,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_EXT_COHERENT | AMDGPU_GEM_CREATE_UNCACHED); } @@ -408,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) if (!r) r = amdgpu_vm_clear_freed(adev, vm, NULL); if (!r) - r = amdgpu_vm_handle_moved(adev, vm); + r = amdgpu_vm_handle_moved(adev, vm, ticket); if (r && r != -EBUSY) DRM_ERROR("Failed to invalidate VM page tables (%d))\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 4a8b33f55..2675689ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -86,6 +86,7 @@ struct amdgpu_doorbell_index { uint32_t vce_ring6_7; } uvd_vce; }; + uint32_t vpe_ring; uint32_t first_non_cp; uint32_t last_non_cp; uint32_t max_assignment; @@ -226,10 +227,12 @@ enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT { AMDGPU_NAVI10_DOORBELL64_VCNc_d = 0x18E, AMDGPU_NAVI10_DOORBELL64_VCNe_f = 0x18F, + AMDGPU_NAVI10_DOORBELL64_VPE = 0x190, + AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0, - AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP = AMDGPU_NAVI10_DOORBELL64_VCNe_f, + AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP = AMDGPU_NAVI10_DOORBELL64_VPE, - AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = 0x18F, + AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = AMDGPU_NAVI10_DOORBELL64_VPE, AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 2c35036e4..10c4a8cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -113,11 +113,23 @@ * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi * 3.53.0 - Support for GFX11 CP GFX shadowing * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support + * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query + * - 3.56.0 - Update IB start address and size alignment for decode and encode + * - 3.57.0 - Compute tunneling on GFX10+ */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 54 +#define KMS_DRIVER_MINOR 57 #define KMS_DRIVER_PATCHLEVEL 0 +/* + * amdgpu.debug module options. Are all disabled by default + */ +enum AMDGPU_DEBUG_MASK { + AMDGPU_DEBUG_VM = BIT(0), + AMDGPU_DEBUG_LARGEBAR = BIT(1), + AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), +}; + unsigned int amdgpu_vram_limit = UINT_MAX; int amdgpu_vis_vram_limit; int amdgpu_gart_size = -1; /* auto */ @@ -140,7 +152,6 @@ int amdgpu_vm_size = -1; int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop; -int amdgpu_vm_debug; int amdgpu_vm_update_mode = -1; int amdgpu_exp_hw_support; int amdgpu_dc = -1; @@ -194,6 +205,10 @@ int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; +int amdgpu_umsch_mm; +int amdgpu_seamless = -1; /* auto */ +uint amdgpu_debug_mask; +int amdgpu_agp = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -405,13 +420,6 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)"); module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); -/** - * DOC: vm_debug (int) - * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled). - */ -MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); -module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); - /** * DOC: vm_update_mode (int) * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default @@ -743,18 +751,6 @@ module_param(send_sigterm, int, 0444); MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); -/** - * DOC: debug_largebar (int) - * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar - * system. This limits the VRAM size reported to ROCm applications to the visible - * size, usually 256MB. - * Default value is 0, diabled. - */ -int debug_largebar; -module_param(debug_largebar, int, 0444); -MODULE_PARM_DESC(debug_largebar, - "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); - /** * DOC: halt_if_hws_hang (int) * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. @@ -907,6 +903,15 @@ module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444); MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)"); module_param_named(sg_display, amdgpu_sg_display, int, 0444); +/** + * DOC: umsch_mm (int) + * Enable Multi Media User Mode Scheduler. This is a HW scheduling engine for VCN and VPE. + * (0 = disabled (default), 1 = enabled) + */ +MODULE_PARM_DESC(umsch_mm, + "Enable Multi Media User Mode Scheduler (0 = disabled (default), 1 = enabled)"); +module_param_named(umsch_mm, amdgpu_umsch_mm, int, 0444); + /** * DOC: smu_pptable_id (int) * Used to override pptable id. id = 0 use VBIOS pptable. @@ -938,6 +943,35 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); module_param(enforce_isolation, bool, 0444); MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); +/** + * DOC: seamless (int) + * Seamless boot will keep the image on the screen during the boot process. + */ +MODULE_PARM_DESC(seamless, "Seamless boot (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(seamless, amdgpu_seamless, int, 0444); + +/** + * DOC: debug_mask (uint) + * Debug options for amdgpu, work as a binary mask with the following options: + * + * - 0x1: Debug VM handling + * - 0x2: Enable simulating large-bar capability on non-large bar system. This + * limits the VRAM size reported to ROCm applications to the visible + * size, usually 256MB. + * - 0x4: Disable GPU soft recovery, always do a full reset + */ +MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default"); +module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); + +/** + * DOC: agp (int) + * Enable the AGP aperture. This provides an aperture in the GPU's internal + * address space for direct access to system memory. Note that these accesses + * are non-snooped, so they are only used for access to uncached memory. + */ +MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(agp, amdgpu_agp, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ @@ -2018,6 +2052,14 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); +static const struct amdgpu_asic_type_quirk asic_type_quirks[] = { + /* differentiate between P10 and P11 asics with the same DID */ + {0x67FF, 0xE3, CHIP_POLARIS10}, + {0x67FF, 0xE7, CHIP_POLARIS10}, + {0x67FF, 0xF3, CHIP_POLARIS10}, + {0x67FF, 0xF7, CHIP_POLARIS10}, +}; + static const struct drm_driver amdgpu_kms_driver; static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) @@ -2042,6 +2084,40 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) } } +static void amdgpu_init_debug_options(struct amdgpu_device *adev) +{ + if (amdgpu_debug_mask & AMDGPU_DEBUG_VM) { + pr_info("debug: VM handling debug enabled\n"); + adev->debug_vm = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_LARGEBAR) { + pr_info("debug: enabled simulating large-bar capability on non-large bar system\n"); + adev->debug_largebar = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY) { + pr_info("debug: soft reset for GPU recovery disabled\n"); + adev->debug_disable_soft_recovery = true; + } +} + +static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) { + if (pdev->device == asic_type_quirks[i].device && + pdev->revision == asic_type_quirks[i].revision) { + flags &= ~AMD_ASIC_MASK; + flags |= asic_type_quirks[i].type; + break; + } + } + + return flags; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2069,15 +2145,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } - /* differentiate between P10 and P11 asics with the same DID */ - if (pdev->device == 0x67FF && - (pdev->revision == 0xE3 || - pdev->revision == 0xE7 || - pdev->revision == 0xF3 || - pdev->revision == 0xF7)) { - flags &= ~AMD_ASIC_MASK; - flags |= CHIP_POLARIS10; - } + + flags = amdgpu_fix_asic_type(pdev, flags); /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption @@ -2222,6 +2291,8 @@ retry_init: amdgpu_get_secondary_funcs(adev); } + amdgpu_init_debug_options(adev); + return 0; err_pci: @@ -2243,38 +2314,6 @@ amdgpu_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(dev->dev); } - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && - !amdgpu_sriov_vf(adev)) { - bool need_to_reset_gpu = false; - - if (adev->gmc.xgmi.num_physical_nodes > 1) { - struct amdgpu_hive_info *hive; - - hive = amdgpu_get_xgmi_hive(adev); - if (hive->device_remove_count == 0) - need_to_reset_gpu = true; - hive->device_remove_count++; - amdgpu_put_xgmi_hive(hive); - } else { - need_to_reset_gpu = true; - } - - /* Workaround for ASICs need to reset SMU. - * Called only when the first device is removed. - */ - if (need_to_reset_gpu) { - struct amdgpu_reset_context reset_context; - - adev->shutdown = true; - memset(&reset_context, 0, sizeof(reset_context)); - reset_context.method = AMD_RESET_METHOD_NONE; - reset_context.reset_req_dev = adev; - set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags); - amdgpu_device_gpu_recover(adev, NULL, &reset_context); - } - } - amdgpu_driver_unload_kms(dev); /* @@ -2386,8 +2425,9 @@ static int amdgpu_pmops_prepare(struct device *dev) /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev)) - return pm_runtime_suspended(dev); + if (amdgpu_device_supports_boco(drm_dev) && + pm_runtime_suspended(dev)) + return 1; /* if we will not support s3 or s2i for the device * then skip suspend @@ -2396,7 +2436,7 @@ static int amdgpu_pmops_prepare(struct device *dev) !amdgpu_acpi_is_s3_active(adev)) return 1; - return 0; + return amdgpu_device_prepare(drm_dev); } static void amdgpu_pmops_complete(struct device *dev) @@ -2409,6 +2449,7 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2423,6 +2464,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); @@ -2596,6 +2638,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) if (amdgpu_device_supports_boco(drm_dev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; + ret = amdgpu_device_prepare(drm_dev); + if (ret) + return ret; ret = amdgpu_device_suspend(drm_dev, false); if (ret) { adev->in_runpm = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 6038b5021..5706b282a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -51,25 +51,20 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_DEC] = "dec", [AMDGPU_HW_IP_VCN_ENC] = "enc", [AMDGPU_HW_IP_VCN_JPEG] = "jpeg", + [AMDGPU_HW_IP_VPE] = "vpe", }; void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) { - struct amdgpu_device *adev = drm_to_adev(file->minor->dev); struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_mem_stats stats; ktime_t usage[AMDGPU_HW_IP_NUM]; - uint32_t bus, dev, fn, domain; unsigned int hw_ip; int ret; memset(&stats, 0, sizeof(stats)); - bus = adev->pdev->bus->number; - domain = pci_domain_nr(adev->pdev->bus); - dev = PCI_SLOT(adev->pdev->devfn); - fn = PCI_FUNC(adev->pdev->devfn); ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret) @@ -87,9 +82,6 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); - drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); - drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context); drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7537f5aa7..dc2302127 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -570,7 +570,8 @@ static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) switch (ring->funcs->type) { case AMDGPU_RING_TYPE_SDMA: /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= + IP_VERSION(5, 0, 0)) is_gfx_power_domain = true; break; case AMDGPU_RING_TYPE_GFX: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 7cd0dfaee..a08c148b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -42,8 +42,9 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) /* The i2c access is blocked on VF * TODO: Need other way to get the info + * Also, FRU not valid for APU devices. */ - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) return false; /* The default I2C EEPROM address of the FRU. @@ -57,27 +58,26 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) * for ease/speed/readability. For now, 2 string comparisons are * reasonable and not too expensive */ - switch (adev->asic_type) { - case CHIP_VEGA20: - /* D161 and D163 are the VG20 server SKUs */ - if (strnstr(atom_ctx->vbios_pn, "D161", - sizeof(atom_ctx->vbios_pn)) || - strnstr(atom_ctx->vbios_pn, "D163", - sizeof(atom_ctx->vbios_pn))) { - if (fru_addr) - *fru_addr = FRU_EEPROM_MADDR_6; - return true; - } else { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(11, 0, 2): + switch (adev->asic_type) { + case CHIP_VEGA20: + /* D161 and D163 are the VG20 server SKUs */ + if (strnstr(atom_ctx->vbios_pn, "D161", + sizeof(atom_ctx->vbios_pn)) || + strnstr(atom_ctx->vbios_pn, "D163", + sizeof(atom_ctx->vbios_pn))) { + if (fru_addr) + *fru_addr = FRU_EEPROM_MADDR_6; + return true; + } else { + return false; + } + case CHIP_ARCTURUS: + default: return false; } - case CHIP_ALDEBARAN: - /* All Aldebaran SKUs have an FRU */ - if (!strnstr(atom_ctx->vbios_pn, "D673", - sizeof(atom_ctx->vbios_pn))) - if (fru_addr) - *fru_addr = FRU_EEPROM_MADDR_6; - return true; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): if (strnstr(atom_ctx->vbios_pn, "D603", sizeof(atom_ctx->vbios_pn))) { if (strnstr(atom_ctx->vbios_pn, "D603GLXE", @@ -92,6 +92,17 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) } else { return false; } + case IP_VERSION(13, 0, 2): + /* All Aldebaran SKUs have an FRU */ + if (!strnstr(atom_ctx->vbios_pn, "D673", + sizeof(atom_ctx->vbios_pn))) + if (fru_addr) + *fru_addr = FRU_EEPROM_MADDR_6; + return true; + case IP_VERSION(13, 0, 6): + if (fru_addr) + *fru_addr = FRU_EEPROM_MADDR_8; + return true; default: return false; } @@ -99,6 +110,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) int amdgpu_fru_get_product_info(struct amdgpu_device *adev) { + struct amdgpu_fru_info *fru_info; unsigned char buf[8], *pia; u32 addr, fru_addr; int size, len; @@ -107,6 +119,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) if (!is_fru_eeprom_supported(adev, &fru_addr)) return 0; + if (!adev->fru_info) { + adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL); + if (!adev->fru_info) + return -ENOMEM; + } + + fru_info = adev->fru_info; + /* For Arcturus-and-later, default value of serial_number is unique_id + * so convert it to a 16-digit HEX string for convenience and + * backwards-compatibility. + */ + sprintf(fru_info->serial, "%llx", adev->unique_id); + /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); @@ -176,27 +201,33 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* Now extract useful information from the PIA. * - * Skip the Manufacturer Name at [3] and go directly to - * the Product Name field. + * Read Manufacturer Name field whose length is [3]. */ - addr = 3 + 1 + (pia[3] & 0x3F); + addr = 3; if (addr + 1 >= len) goto Out; - memcpy(adev->product_name, pia + addr + 1, - min_t(size_t, - sizeof(adev->product_name), + memcpy(fru_info->manufacturer_name, pia + addr + 1, + min_t(size_t, sizeof(fru_info->manufacturer_name), pia[addr] & 0x3F)); - adev->product_name[sizeof(adev->product_name) - 1] = '\0'; + fru_info->manufacturer_name[sizeof(fru_info->manufacturer_name) - 1] = + '\0'; + + /* Read Product Name field. */ + addr += 1 + (pia[addr] & 0x3F); + if (addr + 1 >= len) + goto Out; + memcpy(fru_info->product_name, pia + addr + 1, + min_t(size_t, sizeof(fru_info->product_name), pia[addr] & 0x3F)); + fru_info->product_name[sizeof(fru_info->product_name) - 1] = '\0'; /* Go to the Product Part/Model Number field. */ addr += 1 + (pia[addr] & 0x3F); if (addr + 1 >= len) goto Out; - memcpy(adev->product_number, pia + addr + 1, - min_t(size_t, - sizeof(adev->product_number), + memcpy(fru_info->product_number, pia + addr + 1, + min_t(size_t, sizeof(fru_info->product_number), pia[addr] & 0x3F)); - adev->product_number[sizeof(adev->product_number) - 1] = '\0'; + fru_info->product_number[sizeof(fru_info->product_number) - 1] = '\0'; /* Go to the Product Version field. */ addr += 1 + (pia[addr] & 0x3F); @@ -205,10 +236,21 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) addr += 1 + (pia[addr] & 0x3F); if (addr + 1 >= len) goto Out; - memcpy(adev->serial, pia + addr + 1, min_t(size_t, - sizeof(adev->serial), - pia[addr] & 0x3F)); - adev->serial[sizeof(adev->serial) - 1] = '\0'; + memcpy(fru_info->serial, pia + addr + 1, + min_t(size_t, sizeof(fru_info->serial), pia[addr] & 0x3F)); + fru_info->serial[sizeof(fru_info->serial) - 1] = '\0'; + + /* Asset Tag field */ + addr += 1 + (pia[addr] & 0x3F); + + /* FRU File Id field. This could be 'null'. */ + addr += 1 + (pia[addr] & 0x3F); + if ((addr + 1 >= len) || !(pia[addr] & 0x3F)) + goto Out; + memcpy(fru_info->fru_id, pia + addr + 1, + min_t(size_t, sizeof(fru_info->fru_id), pia[addr] & 0x3F)); + fru_info->fru_id[sizeof(fru_info->fru_id) - 1] = '\0'; + Out: kfree(pia); return 0; @@ -231,7 +273,7 @@ static ssize_t amdgpu_fru_product_name_show(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - return sysfs_emit(buf, "%s\n", adev->product_name); + return sysfs_emit(buf, "%s\n", adev->fru_info->product_name); } static DEVICE_ATTR(product_name, 0444, amdgpu_fru_product_name_show, NULL); @@ -253,7 +295,7 @@ static ssize_t amdgpu_fru_product_number_show(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - return sysfs_emit(buf, "%s\n", adev->product_number); + return sysfs_emit(buf, "%s\n", adev->fru_info->product_number); } static DEVICE_ATTR(product_number, 0444, amdgpu_fru_product_number_show, NULL); @@ -275,21 +317,65 @@ static ssize_t amdgpu_fru_serial_number_show(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - return sysfs_emit(buf, "%s\n", adev->serial); + return sysfs_emit(buf, "%s\n", adev->fru_info->serial); } static DEVICE_ATTR(serial_number, 0444, amdgpu_fru_serial_number_show, NULL); +/** + * DOC: fru_id + * + * The amdgpu driver provides a sysfs API for reporting FRU File Id + * for the device. + * The file fru_id is used for this and returns the File Id value + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_fru_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->fru_info->fru_id); +} + +static DEVICE_ATTR(fru_id, 0444, amdgpu_fru_id_show, NULL); + +/** + * DOC: manufacturer + * + * The amdgpu driver provides a sysfs API for reporting manufacturer name from + * FRU information. + * The file manufacturer returns the value as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_fru_manufacturer_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->fru_info->manufacturer_name); +} + +static DEVICE_ATTR(manufacturer, 0444, amdgpu_fru_manufacturer_name_show, NULL); + static const struct attribute *amdgpu_fru_attributes[] = { &dev_attr_product_name.attr, &dev_attr_product_number.attr, &dev_attr_serial_number.attr, + &dev_attr_fru_id.attr, + &dev_attr_manufacturer.attr, NULL }; int amdgpu_fru_sysfs_init(struct amdgpu_device *adev) { - if (!is_fru_eeprom_supported(adev, NULL)) + if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info) return 0; return sysfs_create_files(&adev->dev->kobj, amdgpu_fru_attributes); @@ -297,7 +383,7 @@ int amdgpu_fru_sysfs_init(struct amdgpu_device *adev) void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev) { - if (!is_fru_eeprom_supported(adev, NULL)) + if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info) return; sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h index c817db17c..bc58dca18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h @@ -24,6 +24,17 @@ #ifndef __AMDGPU_FRU_EEPROM_H__ #define __AMDGPU_FRU_EEPROM_H__ +#define AMDGPU_PRODUCT_NAME_LEN 64 + +/* FRU product information */ +struct amdgpu_fru_info { + char product_number[20]; + char product_name[AMDGPU_PRODUCT_NAME_LEN]; + char serial[20]; + char manufacturer_name[32]; + char fru_id[32]; +}; + int amdgpu_fru_get_product_info(struct amdgpu_device *adev); int amdgpu_fru_sysfs_init(struct amdgpu_device *adev); void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 73b8cca35..c623e2304 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) struct amdgpu_bo_param bp; dma_addr_t dma_addr; struct page *p; + unsigned long x; int ret; if (adev->gart.bo != NULL) @@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) if (!p) return -ENOMEM; + /* assign pages to this device */ + for (x = 0; x < (1UL << order); x++) + p[x].mapping = adev->mman.bdev.dev_mapping; + /* If the hardware does not support UTCL2 snooping of the CPU caches * then set_memory_wc() could be used as a workaround to mark the pages * as write combine memory. @@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) unsigned int order = get_order(adev->gart.table_size); struct sg_table *sg = adev->gart.bo->tbo.sg; struct page *p; + unsigned long x; int ret; ret = amdgpu_bo_reserve(adev->gart.bo, false); @@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) sg_free_table(sg); kfree(sg); p = virt_to_page(adev->gart.ptr); + for (x = 0; x < (1UL << order); x++) + p[x].mapping = NULL; __free_pages(p, order); adev->gart.ptr = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a1b15d0d6..84beeaa4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -791,7 +791,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) + if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, args->operation); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ef4cb9217..6ddc8e336 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -29,6 +29,7 @@ #include "amdgpu_rlc.h" #include "amdgpu_ras.h" #include "amdgpu_xcp.h" +#include "amdgpu_xgmi.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -158,7 +159,7 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) return amdgpu_compute_multipipe == 1; } - if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return true; /* FIXME: spreading the queues across pipes causes perf regressions @@ -386,7 +387,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) domain |= AMDGPU_GEM_DOMAIN_VRAM; #endif @@ -503,6 +504,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; + struct amdgpu_hive_info *hive; + struct amdgpu_ras *ras; + int hive_ras_recovery = 0; int i, r = 0; int j; @@ -523,6 +527,23 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) RESET_QUEUES, 0, 0); } + /** + * This is workaround: only skip kiq_ring test + * during ras recovery in suspend stage for gfx9.4.3 + */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + + ras = amdgpu_ras_get_context(adev); + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && + ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) { + spin_unlock(&kiq->ring_lock); + return 0; + } + if (kiq_ring->sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); @@ -702,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else { + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); + } } } else { if (adev->gfx.gfx_off_req_count == 0) { @@ -910,12 +938,12 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, func(adev, ras_error_status, i); } -uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id) { signed long r, cnt = 0; unsigned long flags; uint32_t seq, reg_val_offs = 0, value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; if (amdgpu_device_skip_hw_access(adev)) @@ -978,12 +1006,12 @@ failed_kiq_read: return ~0; } -void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id) { signed long r, cnt = 0; unsigned long flags; uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0ca95c4d4..f23bafec7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -43,10 +43,10 @@ #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L #define AMDGPU_MAX_GC_INSTANCES 8 -#define KGD_MAX_QUEUES 128 +#define AMDGPU_MAX_QUEUES 128 -#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES -#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES +#define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES +#define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES enum amdgpu_gfx_pipe_priority { AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, @@ -69,11 +69,6 @@ enum amdgpu_gfx_partition { #define NUM_XCC(x) hweight16(x) -enum amdgpu_pkg_type { - AMDGPU_PKG_TYPE_APU = 2, - AMDGPU_PKG_TYPE_UNKNOWN, -}; - enum amdgpu_gfx_ras_mem_id_type { AMDGPU_GFX_CP_MEM = 0, AMDGPU_GFX_GCEA_MEM, @@ -526,8 +521,8 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index d78bd9732..55784a9f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -32,6 +32,7 @@ #include "amdgpu.h" #include "amdgpu_gmc.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "amdgpu_xgmi.h" #include @@ -180,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + if (!bo->ttm) + return AMDGPU_BO_INVALID_OFFSET; + if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; @@ -263,12 +267,14 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc * * @adev: amdgpu device structure holding all necessary information * @mc: memory controller structure holding memory information + * @gart_placement: GART placement policy with respect to VRAM * * Function will place try to place GART before or after VRAM. * If GART size is bigger than space left then we ajust GART size. * Thus function will never fails. */ -void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) +void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, + enum amdgpu_gart_placement gart_placement) { const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; @@ -286,11 +292,22 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) mc->gart_size = max(size_bf, size_af); } - if ((size_bf >= mc->gart_size && size_bf < size_af) || - (size_af < mc->gart_size)) - mc->gart_start = 0; - else + switch (gart_placement) { + case AMDGPU_GART_PLACEMENT_HIGH: mc->gart_start = max_mc_address - mc->gart_size + 1; + break; + case AMDGPU_GART_PLACEMENT_LOW: + mc->gart_start = 0; + break; + case AMDGPU_GART_PLACEMENT_BEST_FIT: + default: + if ((size_bf >= mc->gart_size && size_bf < size_af) || + (size_af < mc->gart_size)) + mc->gart_start = 0; + else + mc->gart_start = max_mc_address - mc->gart_size + 1; + break; + } mc->gart_start &= ~(four_gb - 1); mc->gart_end = mc->gart_start + mc->gart_size - 1; @@ -315,14 +332,6 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); u64 size_af, size_bf; - if (amdgpu_sriov_vf(adev)) { - mc->agp_start = 0xffffffffffff; - mc->agp_end = 0x0; - mc->agp_size = 0; - - return; - } - if (mc->fb_start > mc->gart_start) { size_bf = (mc->fb_start & sixteen_gb_mask) - ALIGN(mc->gart_end + 1, sixteen_gb); @@ -346,6 +355,25 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) mc->agp_size >> 20, mc->agp_start, mc->agp_end); } +/** + * amdgpu_gmc_set_agp_default - Set the default AGP aperture value. + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information + * + * To disable the AGP aperture, you need to set the start to a larger + * value than the end. This function sets the default value which + * can then be overridden using amdgpu_gmc_agp_location() if you want + * to enable the AGP aperture on a specific chip. + * + */ +void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev, + struct amdgpu_gmc *mc) +{ + mc->agp_start = 0xffffffffffff; + mc->agp_end = 0; + mc->agp_size = 0; +} + /** * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid * @@ -452,7 +480,10 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint32_t hash; uint64_t tmp; - ih = adev->irq.retry_cam_enabled ? &adev->irq.ih_soft : &adev->irq.ih1; + if (adev->irq.retry_cam_enabled) + return; + + ih = &adev->irq.ih1; /* Get the WPTR of the last entry in IH ring */ last_wptr = amdgpu_ih_get_wptr(adev, ih); /* Order wptr with ring data. */ @@ -549,13 +580,17 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) /* reserve engine 5 for firmware */ if (adev->enable_mes) vm_inv_engs[i] &= ~(1 << 5); + /* reserve mmhub engine 3 for firmware */ + if (adev->enable_umsch_mm) + vm_inv_engs[i] &= ~(1 << 3); } for (i = 0; i < adev->num_rings; ++i) { ring = adev->rings[i]; vmhub = ring->vm_hub; - if (ring == &adev->mes.ring) + if (ring == &adev->mes.ring || + ring == &adev->umsch_mm.ring) continue; inv_eng = ffs(vm_inv_engs[vmhub]); @@ -575,6 +610,142 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) return 0; } +void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type) +{ + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; + struct dma_fence *fence; + struct amdgpu_job *job; + int r; + + if (!hub->sdma_invalidation_workaround || vmid || + !adev->mman.buffer_funcs_enabled || + !adev->ib_pool_ready || amdgpu_in_reset(adev) || + !ring->sched.ready) { + + /* + * A GPU reset should flush all TLBs anyway, so no need to do + * this while one is ongoing. + */ + if (!down_read_trylock(&adev->reset_domain->sem)) + return; + + if (adev->gmc.flush_tlb_needs_extra_type_2) + adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, + vmhub, 2); + + if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2) + adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, + vmhub, 0); + + adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub, + flush_type); + up_read(&adev->reset_domain->sem); + return; + } + + /* The SDMA on Navi 1x has a bug which can theoretically result in memory + * corruption if an invalidation happens at the same time as an VA + * translation. Avoid this by doing the invalidation from the SDMA + * itself at least for GART. + */ + mutex_lock(&adev->mman.gtt_window_lock); + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, + AMDGPU_FENCE_OWNER_UNDEFINED, + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, + &job); + if (r) + goto error_alloc; + + job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); + job->vm_needs_flush = true; + job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + fence = amdgpu_job_submit(job); + mutex_unlock(&adev->mman.gtt_window_lock); + + dma_fence_wait(fence, false); + dma_fence_put(fence); + + return; + +error_alloc: + mutex_unlock(&adev->mman.gtt_window_lock); + dev_err(adev->dev, "Error flushing GPU TLB using the SDMA (%d)!\n", r); +} + +int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, + uint32_t flush_type, bool all_hub, + uint32_t inst) +{ + u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : + adev->usec_timeout; + struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; + unsigned int ndw; + signed long r; + uint32_t seq; + + if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready || + !down_read_trylock(&adev->reset_domain->sem)) { + + if (adev->gmc.flush_tlb_needs_extra_type_2) + adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, + 2, all_hub, + inst); + + if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2) + adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, + 0, all_hub, + inst); + + adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, + flush_type, all_hub, + inst); + return 0; + } + + /* 2 dwords flush + 8 dwords fence */ + ndw = kiq->pmf->invalidate_tlbs_size + 8; + + if (adev->gmc.flush_tlb_needs_extra_type_2) + ndw += kiq->pmf->invalidate_tlbs_size; + + if (adev->gmc.flush_tlb_needs_extra_type_0) + ndw += kiq->pmf->invalidate_tlbs_size; + + spin_lock(&adev->gfx.kiq[inst].ring_lock); + amdgpu_ring_alloc(ring, ndw); + if (adev->gmc.flush_tlb_needs_extra_type_2) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); + + if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); + + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + goto error_unlock_reset; + } + + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); + if (r < 1) { + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); + r = -ETIME; + goto error_unlock_reset; + } + r = 0; + +error_unlock_reset: + up_read(&adev->reset_domain->sem); + return r; +} + /** * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ * @adev: amdgpu_device pointer @@ -584,7 +755,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) */ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { /* RAVEN */ case IP_VERSION(9, 2, 2): case IP_VERSION(9, 1, 0): @@ -618,6 +789,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { @@ -648,7 +820,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) { struct amdgpu_gmc *gmc = &adev->gmc; - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || gc_ver == IP_VERSION(9, 3, 0) || gc_ver == IP_VERSION(9, 4, 0) || @@ -657,7 +829,10 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) gc_ver == IP_VERSION(9, 4, 3) || gc_ver >= IP_VERSION(10, 3, 0)); - gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; + if (!amdgpu_sriov_xnack_support(adev)) + gmc->noretry = 1; + else + gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; } void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, @@ -721,12 +896,6 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) case CHIP_RENOIR: adev->mman.keep_stolen_vga_memory = true; break; - case CHIP_YELLOW_CARP: - if (amdgpu_discovery == 0) { - adev->mman.stolen_reserved_offset = 0x1ffb0000; - adev->mman.stolen_reserved_size = 64 * PAGE_SIZE; - } - break; default: adev->mman.keep_stolen_vga_memory = false; break; @@ -876,21 +1045,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) * seconds, so here, we just pick up three parts for emulation. */ ret = memcmp(vram_ptr, cptr, 10); - if (ret) - return ret; + if (ret) { + ret = -EIO; + goto release_buffer; + } ret = memcmp(vram_ptr + (size / 2), cptr, 10); - if (ret) - return ret; + if (ret) { + ret = -EIO; + goto release_buffer; + } ret = memcmp(vram_ptr + size - 10, cptr, 10); - if (ret) - return ret; + if (ret) { + ret = -EIO; + goto release_buffer; + } +release_buffer: amdgpu_bo_free_kernel(&vram_bo, &vram_gpu, &vram_ptr); - return 0; + return ret; } static ssize_t current_memory_partition_show( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index fdc25cd55..e699d1ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -117,6 +117,8 @@ struct amdgpu_vmhub { uint32_t vm_contexts_disable; + bool sdma_invalidation_workaround; + const struct amdgpu_vmhub_funcs *vmhub_funcs; }; @@ -128,9 +130,9 @@ struct amdgpu_gmc_funcs { void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type); /* flush the vm tlb via pasid */ - int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, - uint32_t flush_type, bool all_hub, - uint32_t inst); + void (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, + uint32_t flush_type, bool all_hub, + uint32_t inst); /* flush the vm tlb via ring */ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); @@ -197,6 +199,12 @@ struct amdgpu_mem_partition_info { #define INVALID_PFN -1 +enum amdgpu_gart_placement { + AMDGPU_GART_PLACEMENT_BEST_FIT = 0, + AMDGPU_GART_PLACEMENT_HIGH, + AMDGPU_GART_PLACEMENT_LOW, +}; + struct amdgpu_gmc { /* FB's physical address in MMIO space (for CPU to * map FB). This is different compared to the agp/ @@ -333,12 +341,12 @@ struct amdgpu_gmc { u64 MC_VM_MX_L1_TLB_CNTL; u64 noretry_flags; + + bool flush_tlb_needs_extra_type_0; + bool flush_tlb_needs_extra_type_2; + bool flush_pasid_uses_kiq; }; -#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) -#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub, inst) \ - ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \ - ((adev), (pasid), (type), (allhub), (inst))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) @@ -389,9 +397,12 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_gmc_gart_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc); + struct amdgpu_gmc *mc, + enum amdgpu_gart_placement gart_placement); void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); +void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev, + struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, uint64_t addr, uint16_t pasid, uint64_t timestamp); @@ -401,6 +412,11 @@ int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); +void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type); +int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, + uint32_t flush_type, bool all_hub, + uint32_t inst); extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index ff1ea9929..ddd0891da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -188,7 +188,6 @@ static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, /** * amdgpu_vmid_grab_idle - grab idle VMID * - * @vm: vm to allocate id for * @ring: ring we want to submit job to * @idle: resulting idle VMID * @fence: fence to wait for if no id could be grabbed @@ -196,8 +195,7 @@ static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, * Try to find an idle VMID, if none is idle add a fence to wait to the sync * object. Returns -ENOMEM when we are out of memory. */ -static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, - struct amdgpu_ring *ring, +static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, struct amdgpu_vmid **idle, struct dma_fence **fence) { @@ -405,7 +403,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int r = 0; mutex_lock(&id_mgr->lock); - r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence); + r = amdgpu_vmid_grab_idle(ring, &idle, fence); if (r || !idle) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index fa6d0adce..7e6d09730 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -270,29 +270,29 @@ static void amdgpu_restore_msix(struct amdgpu_device *adev) */ int amdgpu_irq_init(struct amdgpu_device *adev) { - int r = 0; - unsigned int irq; + unsigned int irq, flags; + int r; spin_lock_init(&adev->irq.lock); /* Enable MSI if not disabled by module parameter */ adev->irq.msi_enabled = false; + if (!amdgpu_msi_ok(adev)) + flags = PCI_IRQ_LEGACY; + else + flags = PCI_IRQ_ALL_TYPES; + + /* we only need one vector */ + r = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); + if (r < 0) { + dev_err(adev->dev, "Failed to alloc msi vectors\n"); + return r; + } + if (amdgpu_msi_ok(adev)) { - int nvec = pci_msix_vec_count(adev->pdev); - unsigned int flags; - - if (nvec <= 0) - flags = PCI_IRQ_MSI; - else - flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; - - /* we only need one vector */ - nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); - if (nvec > 0) { - adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "using MSI/MSI-X.\n"); - } + adev->irq.msi_enabled = true; + dev_dbg(adev->dev, "using MSI/MSI-X.\n"); } INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); @@ -302,22 +302,29 @@ int amdgpu_irq_init(struct amdgpu_device *adev) /* Use vector 0 for MSI-X. */ r = pci_irq_vector(adev->pdev, 0); if (r < 0) - return r; + goto free_vectors; irq = r; /* PCI devices require shared interrupts. */ r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, adev_to_drm(adev)); if (r) - return r; + goto free_vectors; + adev->irq.installed = true; adev->irq.irq = irq; adev_to_drm(adev)->max_vblank_count = 0x00ffffff; DRM_DEBUG("amdgpu: irq initialized.\n"); return 0; -} +free_vectors: + if (adev->irq.msi_enabled) + pci_free_irq_vectors(adev->pdev); + + adev->irq.msi_enabled = false; + return r; +} void amdgpu_irq_fini_hw(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 78476bc75..1f3571985 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -325,8 +325,8 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { - struct drm_sched_rq *rq = &sched->sched_rq[i]; + for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 58dab4f73..598867919 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -200,6 +200,44 @@ out: return r; } +static enum amd_ip_block_type + amdgpu_ip_get_block_type(struct amdgpu_device *adev, uint32_t ip) +{ + enum amd_ip_block_type type; + + switch (ip) { + case AMDGPU_HW_IP_GFX: + type = AMD_IP_BLOCK_TYPE_GFX; + break; + case AMDGPU_HW_IP_COMPUTE: + type = AMD_IP_BLOCK_TYPE_GFX; + break; + case AMDGPU_HW_IP_DMA: + type = AMD_IP_BLOCK_TYPE_SDMA; + break; + case AMDGPU_HW_IP_UVD: + case AMDGPU_HW_IP_UVD_ENC: + type = AMD_IP_BLOCK_TYPE_UVD; + break; + case AMDGPU_HW_IP_VCE: + type = AMD_IP_BLOCK_TYPE_VCE; + break; + case AMDGPU_HW_IP_VCN_DEC: + case AMDGPU_HW_IP_VCN_ENC: + type = AMD_IP_BLOCK_TYPE_VCN; + break; + case AMDGPU_HW_IP_VCN_JPEG: + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + break; + default: + type = AMD_IP_BLOCK_TYPE_NUM; + break; + } + + return type; +} + static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, struct drm_amdgpu_query_fw *query_fw, struct amdgpu_device *adev) @@ -352,6 +390,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->gfx.imu_fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_VPE: + fw_info->ver = adev->vpe.fw_version; + fw_info->feature = adev->vpe.feature_version; + break; default: return -EINVAL; } @@ -405,7 +447,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->uvd.inst[i].ring.sched.ready) ++num_rings; } - ib_start_alignment = 64; + ib_start_alignment = 256; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCE: @@ -413,8 +455,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, for (i = 0; i < adev->vce.num_rings; i++) if (adev->vce.ring[i].sched.ready) ++num_rings; - ib_start_alignment = 4; - ib_size_alignment = 1; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; @@ -426,8 +468,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->uvd.inst[i].ring_enc[j].sched.ready) ++num_rings; } - ib_start_alignment = 64; - ib_size_alignment = 64; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; @@ -438,8 +480,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->vcn.inst[i].ring_dec.sched.ready) ++num_rings; } - ib_start_alignment = 16; - ib_size_alignment = 16; + ib_start_alignment = 256; + ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; @@ -451,8 +493,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->vcn.inst[i].ring_enc[j].sched.ready) ++num_rings; } - ib_start_alignment = 64; - ib_size_alignment = 1; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_VCN_JPEG: type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? @@ -466,8 +508,15 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->jpeg.inst[i].ring_dec[j].sched.ready) ++num_rings; } - ib_start_alignment = 16; - ib_size_alignment = 16; + ib_start_alignment = 256; + ib_size_alignment = 64; + break; + case AMDGPU_HW_IP_VPE: + type = AMD_IP_BLOCK_TYPE_VPE; + if (adev->vpe.ring.sched.ready) + ++num_rings; + ib_start_alignment = 256; + ib_size_alignment = 4; break; default: return -EINVAL; @@ -490,18 +539,26 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->asic_type >= CHIP_VEGA10) { switch (type) { case AMD_IP_BLOCK_TYPE_GFX: - result->ip_discovery_version = adev->ip_versions[GC_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, GC_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_SDMA: - result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, SDMA0_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_JPEG: - result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, UVD_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_VCE: - result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCE_HWIP, 0)); + break; + case AMD_IP_BLOCK_TYPE_VPE: + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0)); break; default: result->ip_discovery_version = 0; @@ -538,11 +595,16 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info *info = data; struct amdgpu_mode_info *minfo = &adev->mode_info; void __user *out = (void __user *)(uintptr_t)info->return_pointer; + struct amdgpu_fpriv *fpriv; + struct amdgpu_ip_block *ip_block; + enum amd_ip_block_type type; + struct amdgpu_xcp *xcp; + u32 count, inst_mask; uint32_t size = info->return_size; struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, found; + int i, found, ret; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -570,7 +632,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; case AMDGPU_INFO_HW_IP_INFO: { struct drm_amdgpu_info_hw_ip ip = {}; - int ret; ret = amdgpu_hw_ip_info(adev, info, &ip); if (ret) @@ -580,46 +641,66 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return ret ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { - enum amd_ip_block_type type; - uint32_t count = 0; + fpriv = (struct amdgpu_fpriv *)filp->driver_priv; + type = amdgpu_ip_get_block_type(adev, info->query_hw_ip.type); + ip_block = amdgpu_device_ip_get_ip_block(adev, type); - switch (info->query_hw_ip.type) { - case AMDGPU_HW_IP_GFX: - type = AMD_IP_BLOCK_TYPE_GFX; - break; - case AMDGPU_HW_IP_COMPUTE: - type = AMD_IP_BLOCK_TYPE_GFX; - break; - case AMDGPU_HW_IP_DMA: - type = AMD_IP_BLOCK_TYPE_SDMA; - break; - case AMDGPU_HW_IP_UVD: - type = AMD_IP_BLOCK_TYPE_UVD; + if (!ip_block || !ip_block->status.valid) + return -EINVAL; + + if (adev->xcp_mgr && adev->xcp_mgr->num_xcps > 0 && + fpriv->xcp_id >= 0 && fpriv->xcp_id < adev->xcp_mgr->num_xcps) { + xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id]; + switch (type) { + case AMD_IP_BLOCK_TYPE_GFX: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + count = hweight32(inst_mask); + break; + case AMD_IP_BLOCK_TYPE_SDMA: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_SDMA, &inst_mask); + count = hweight32(inst_mask); + break; + case AMD_IP_BLOCK_TYPE_JPEG: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + count = hweight32(inst_mask) * adev->jpeg.num_jpeg_rings; + break; + case AMD_IP_BLOCK_TYPE_VCN: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + count = hweight32(inst_mask); + break; + default: + return -EINVAL; + } + if (ret) + return ret; + return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; + } + + switch (type) { + case AMD_IP_BLOCK_TYPE_GFX: + case AMD_IP_BLOCK_TYPE_VCE: + count = 1; break; - case AMDGPU_HW_IP_VCE: - type = AMD_IP_BLOCK_TYPE_VCE; + case AMD_IP_BLOCK_TYPE_SDMA: + count = adev->sdma.num_instances; break; - case AMDGPU_HW_IP_UVD_ENC: - type = AMD_IP_BLOCK_TYPE_UVD; + case AMD_IP_BLOCK_TYPE_JPEG: + count = adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings; break; - case AMDGPU_HW_IP_VCN_DEC: - case AMDGPU_HW_IP_VCN_ENC: - type = AMD_IP_BLOCK_TYPE_VCN; + case AMD_IP_BLOCK_TYPE_VCN: + count = adev->vcn.num_vcn_inst; break; - case AMDGPU_HW_IP_VCN_JPEG: - type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? - AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + case AMD_IP_BLOCK_TYPE_UVD: + count = adev->uvd.num_uvd_inst; break; + /* For all other IP block types not listed in the switch statement + * the ip status is valid here and the instance count is one. + */ default: - return -EINVAL; + count = 1; + break; } - for (i = 0; i < adev->num_ip_blocks; i++) - if (adev->ip_blocks[i].version->type == type && - adev->ip_blocks[i].status.valid && - count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) - count++; - return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: @@ -627,7 +708,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; - int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) @@ -772,7 +852,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info_device *dev_info; uint64_t vm_size; uint32_t pcie_gen_mask; - int ret; dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); if (!dev_info) @@ -1178,6 +1257,26 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return copy_to_user(out, max_ibs, min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0; } + case AMDGPU_INFO_GPUVM_FAULT: { + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + struct drm_amdgpu_info_gpuvm_fault gpuvm_fault; + unsigned long flags; + + if (!vm) + return -EINVAL; + + memset(&gpuvm_fault, 0, sizeof(gpuvm_fault)); + + xa_lock_irqsave(&adev->vm_manager.pasids, flags); + gpuvm_fault.addr = vm->fault_info.addr; + gpuvm_fault.status = vm->fault_info.status; + gpuvm_fault.vmhub = vm->fault_info.vmhub; + xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); + + return copy_to_user(out, &gpuvm_fault, + min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -1734,6 +1833,14 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* VPE */ + query_fw.fw_type = AMDGPU_INFO_FW_VPE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VPE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 8d9ff9e15..061d88f44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -142,3 +142,355 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) return 0; } + +void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) +{ + if (!mca_set) + return; + + memset(mca_set, 0, sizeof(*mca_set)); + INIT_LIST_HEAD(&mca_set->list); +} + +int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry) +{ + struct mca_bank_node *node; + + if (!entry) + return -EINVAL; + + node = kvzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + memcpy(&node->entry, entry, sizeof(*entry)); + + INIT_LIST_HEAD(&node->node); + list_add_tail(&node->node, &mca_set->list); + + mca_set->nr_entries++; + + return 0; +} + +void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) +{ + struct mca_bank_node *node, *tmp; + + list_for_each_entry_safe(node, tmp, &mca_set->list, node) { + list_del(&node->node); + kvfree(node); + } +} + +void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs) +{ + struct amdgpu_mca *mca = &adev->mca; + + mca->mca_funcs = mca_funcs; +} + +int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (mca_funcs && mca_funcs->mca_set_debug_mode) + return mca_funcs->mca_set_debug_mode(adev, enable); + + return -EOPNOTSUPP; +} + +static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) +{ + dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n"); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_STATUS]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_ADDR]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_MISC0]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_IPID]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_SYND]); +} + +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) +{ + struct amdgpu_smuio_mcm_config_info mcm_info; + struct mca_bank_set mca_set; + struct mca_bank_node *node; + struct mca_bank_entry *entry; + uint32_t count; + int ret, i = 0; + + amdgpu_mca_bank_set_init(&mca_set); + + ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set); + if (ret) + goto out_mca_release; + + list_for_each_entry(node, &mca_set.list, node) { + entry = &node->entry; + + amdgpu_mca_smu_mca_bank_dump(adev, i++, entry); + + count = 0; + ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); + if (ret) + goto out_mca_release; + + if (!count) + continue; + + mcm_info.socket_id = entry->info.socket_id; + mcm_info.die_id = entry->info.aid; + + if (type == AMDGPU_MCA_ERROR_TYPE_UE) + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count); + else + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count); + } + +out_mca_release: + amdgpu_mca_bank_set_release(&mca_set); + + return ret; +} + + +int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!count) + return -EINVAL; + + if (mca_funcs && mca_funcs->mca_get_valid_mca_count) + return mca_funcs->mca_get_valid_mca_count(adev, type, count); + + return -EOPNOTSUPP; +} + +int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, uint32_t *total) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + struct mca_bank_set mca_set; + struct mca_bank_node *node; + struct mca_bank_entry *entry; + uint32_t count; + int ret; + + if (!total) + return -EINVAL; + + if (!mca_funcs) + return -EOPNOTSUPP; + + if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count) + return -EOPNOTSUPP; + + amdgpu_mca_bank_set_init(&mca_set); + + ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set); + if (ret) + goto err_mca_set_release; + + *total = 0; + list_for_each_entry(node, &mca_set.list, node) { + entry = &node->entry; + + count = 0; + ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count); + if (ret) + goto err_mca_set_release; + + *total += count; + } + +err_mca_set_release: + amdgpu_mca_bank_set_release(&mca_set); + + return ret; +} + +int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + if (!count || !entry) + return -EINVAL; + + if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count) + return -EOPNOTSUPP; + + + return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); +} + +int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!mca_set) + return -EINVAL; + + if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set) + return -EOPNOTSUPP; + + WARN_ON(!list_empty(&mca_set->list)); + + return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set); +} + +int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + int idx, struct mca_bank_entry *entry) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + int count; + + if (!mca_funcs || !mca_funcs->mca_get_mca_entry) + return -EOPNOTSUPP; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_UE: + count = mca_funcs->max_ue_count; + break; + case AMDGPU_MCA_ERROR_TYPE_CE: + count = mca_funcs->max_ce_count; + break; + default: + return -EINVAL; + } + + if (idx >= count) + return -EINVAL; + + return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); +} + +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + int ret; + + ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false); + if (ret) + return ret; + + dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off"); + + return 0; +} + +static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry) +{ + int i, idx = entry->idx; + int reg_idx_array[] = { + MCA_REG_IDX_STATUS, + MCA_REG_IDX_ADDR, + MCA_REG_IDX_MISC0, + MCA_REG_IDX_IPID, + MCA_REG_IDX_SYND, + }; + + seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE"); + seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip); + seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", + idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype); + + for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++) + seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]); +} + +static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct mca_bank_entry *entry; + uint32_t count = 0; + int i, ret; + + ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); + if (ret) + return ret; + + seq_printf(m, "amdgpu smu %s valid mca count: %d\n", + type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count); + + if (!count) + return 0; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < count; i++) { + memset(entry, 0, sizeof(*entry)); + + ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry); + if (ret) + goto err_free_entry; + + mca_dump_entry(m, entry); + } + +err_free_entry: + kfree(entry); + + return ret; +} + +static int mca_dump_ce_show(struct seq_file *m, void *unused) +{ + return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE); +} + +static int mca_dump_ce_open(struct inode *inode, struct file *file) +{ + return single_open(file, mca_dump_ce_show, inode->i_private); +} + +static const struct file_operations mca_ce_dump_debug_fops = { + .owner = THIS_MODULE, + .open = mca_dump_ce_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int mca_dump_ue_show(struct seq_file *m, void *unused) +{ + return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE); +} + +static int mca_dump_ue_open(struct inode *inode, struct file *file) +{ + return single_open(file, mca_dump_ue_show, inode->i_private); +} + +static const struct file_operations mca_ue_dump_debug_fops = { + .owner = THIS_MODULE, + .open = mca_dump_ue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n"); +#endif + +void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) +{ +#if defined(CONFIG_DEBUG_FS) + if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) + return; + + debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); + debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops); + debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops); +#endif +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 997a073e2..e51e8918e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -21,6 +21,50 @@ #ifndef __AMDGPU_MCA_H__ #define __AMDGPU_MCA_H__ +#include "amdgpu_ras.h" + +#define MCA_MAX_REGS_COUNT (16) + +#define MCA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define MCA_REG__STATUS__VAL(x) MCA_REG_FIELD(x, 63, 63) +#define MCA_REG__STATUS__OVERFLOW(x) MCA_REG_FIELD(x, 62, 62) +#define MCA_REG__STATUS__UC(x) MCA_REG_FIELD(x, 61, 61) +#define MCA_REG__STATUS__EN(x) MCA_REG_FIELD(x, 60, 60) +#define MCA_REG__STATUS__MISCV(x) MCA_REG_FIELD(x, 59, 59) +#define MCA_REG__STATUS__ADDRV(x) MCA_REG_FIELD(x, 58, 58) +#define MCA_REG__STATUS__PCC(x) MCA_REG_FIELD(x, 57, 57) +#define MCA_REG__STATUS__ERRCOREIDVAL(x) MCA_REG_FIELD(x, 56, 56) +#define MCA_REG__STATUS__TCC(x) MCA_REG_FIELD(x, 55, 55) +#define MCA_REG__STATUS__SYNDV(x) MCA_REG_FIELD(x, 53, 53) +#define MCA_REG__STATUS__CECC(x) MCA_REG_FIELD(x, 46, 46) +#define MCA_REG__STATUS__UECC(x) MCA_REG_FIELD(x, 45, 45) +#define MCA_REG__STATUS__DEFERRED(x) MCA_REG_FIELD(x, 44, 44) +#define MCA_REG__STATUS__POISON(x) MCA_REG_FIELD(x, 43, 43) +#define MCA_REG__STATUS__SCRUB(x) MCA_REG_FIELD(x, 40, 40) +#define MCA_REG__STATUS__ERRCOREID(x) MCA_REG_FIELD(x, 37, 32) +#define MCA_REG__STATUS__ADDRLSB(x) MCA_REG_FIELD(x, 29, 24) +#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16) +#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0) + +#define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0) + +enum amdgpu_mca_ip { + AMDGPU_MCA_IP_UNKNOW = -1, + AMDGPU_MCA_IP_PSP = 0, + AMDGPU_MCA_IP_SDMA, + AMDGPU_MCA_IP_GC, + AMDGPU_MCA_IP_SMU, + AMDGPU_MCA_IP_MP5, + AMDGPU_MCA_IP_UMC, + AMDGPU_MCA_IP_PCS_XGMI, + AMDGPU_MCA_IP_COUNT, +}; + +enum amdgpu_mca_error_type { + AMDGPU_MCA_ERROR_TYPE_UE = 0, + AMDGPU_MCA_ERROR_TYPE_CE, +}; + struct amdgpu_mca_ras_block { struct amdgpu_ras_block_object ras_block; }; @@ -34,6 +78,55 @@ struct amdgpu_mca { struct amdgpu_mca_ras mp0; struct amdgpu_mca_ras mp1; struct amdgpu_mca_ras mpio; + const struct amdgpu_mca_smu_funcs *mca_funcs; +}; + +enum mca_reg_idx { + MCA_REG_IDX_STATUS = 1, + MCA_REG_IDX_ADDR = 2, + MCA_REG_IDX_MISC0 = 3, + MCA_REG_IDX_IPID = 5, + MCA_REG_IDX_SYND = 6, + MCA_REG_IDX_COUNT = 16, +}; + +struct mca_bank_info { + int socket_id; + int aid; + int hwid; + int mcatype; +}; + +struct mca_bank_entry { + int idx; + enum amdgpu_mca_error_type type; + enum amdgpu_mca_ip ip; + struct mca_bank_info info; + uint64_t regs[MCA_MAX_REGS_COUNT]; +}; + +struct mca_bank_node { + struct mca_bank_entry entry; + struct list_head node; +}; + +struct mca_bank_set { + int nr_entries; + struct list_head list; +}; + +struct amdgpu_mca_smu_funcs { + int max_ue_count; + int max_ce_count; + int (*mca_set_debug_mode)(struct amdgpu_device *adev, bool enable); + int (*mca_get_ras_mca_set)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_set *mca_set); + int (*mca_parse_mca_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry, uint32_t *count); + int (*mca_get_valid_mca_count)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + uint32_t *count); + int (*mca_get_mca_entry)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + int idx, struct mca_bank_entry *entry); }; void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, @@ -53,4 +146,26 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev); int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev); int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev); + +void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs); +int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable); +int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count); +int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, uint32_t *total); +int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, uint32_t *count); +int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count); +int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set); +int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + int idx, struct mca_bank_entry *entry); + +void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); + +void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set); +int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry); +void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set); +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 6aa750523..30c010836 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -132,7 +132,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev) adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { - if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < + IP_VERSION(6, 0, 0)) adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; /* zero sdma_hqd_mask for non-existent engine */ else if (adev->sdma.num_instances == 1) @@ -885,6 +886,11 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; op_input.set_shader_debugger.process_context_addr = process_context_addr; op_input.set_shader_debugger.flags.u32all = flags; + + /* use amdgpu mes_flush_shader_debugger instead */ + if (op_input.set_shader_debugger.flags.process_ctx_flush) + return -EINVAL; + op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); @@ -904,6 +910,32 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, return r; } +int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, + uint64_t process_context_addr) +{ + struct mes_misc_op_input op_input = {0}; + int r; + + if (!adev->mes.funcs->misc_op) { + DRM_ERROR("mes flush shader debugger is not supported!\n"); + return -EINVAL; + } + + op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; + op_input.set_shader_debugger.process_context_addr = process_context_addr; + op_input.set_shader_debugger.flags.process_ctx_flush = true; + + amdgpu_mes_lock(&adev->mes); + + r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + if (r) + DRM_ERROR("failed to set_shader_debugger\n"); + + amdgpu_mes_unlock(&adev->mes); + + return r; +} + static void amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, struct amdgpu_ring *ring, @@ -1351,8 +1383,10 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(queue_types); i++) { /* On GFX v10.3, fw hasn't supported to map sdma queue. */ - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) && - adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) && + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(10, 3, 0) && + amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0) && queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) continue; @@ -1413,7 +1447,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", ucode_prefix, pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index a27b424ff..c2c88b772 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -291,9 +291,10 @@ struct mes_misc_op_input { uint64_t process_context_addr; union { struct { - uint64_t single_memop : 1; - uint64_t single_alu_op : 1; - uint64_t reserved: 30; + uint32_t single_memop : 1; + uint32_t single_alu_op : 1; + uint32_t reserved: 29; + uint32_t process_ctx_flush: 1; }; uint32_t u32all; } flags; @@ -369,7 +370,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, const uint32_t *tcp_watch_cntl, uint32_t flags, bool trap_en); - +int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, + uint64_t process_context_addr); int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, int queue_type, int idx, struct amdgpu_mes_ctx_data *ctx_data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 6cf7a8829..65e35059d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -69,6 +69,8 @@ struct amdgpu_nbio_funcs { u32 (*get_memsize)(struct amdgpu_device *adev); void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size); + void (*vpe_doorbell_range)(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, int doorbell_size); void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, int doorbell_index, int instance); void (*gc_doorbell_init)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ace837cfa..425cebcc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -459,7 +459,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, *cpu_addr = NULL; } -/* Validate bo size is bit bigger then the request domain */ +/* Validate bo size is bit bigger than the request domain */ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, unsigned long size, u32 domain) { @@ -469,29 +469,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, * If GTT is part of requested domains the check must succeed to * allow fall back to GTT. */ - if (domain & AMDGPU_GEM_DOMAIN_GTT) { + if (domain & AMDGPU_GEM_DOMAIN_GTT) man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - - if (man && size < man->size) - return true; - else if (!man) - WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized"); - goto fail; - } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + else if (domain & AMDGPU_GEM_DOMAIN_VRAM) man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + else + return true; - if (man && size < man->size) - return true; - goto fail; + if (!man) { + if (domain & AMDGPU_GEM_DOMAIN_GTT) + WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized"); + return false; } /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */ - return true; + if (size < man->size) + return true; -fail: - if (man) - DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, - man->size); + DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size); return false; } @@ -1250,19 +1245,15 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, * amdgpu_bo_move_notify - notification about a memory move * @bo: pointer to a buffer object * @evict: if this move is evicting the buffer from the graphics address space - * @new_mem: new information of the bufer object * * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs * bookkeeping. * TTM driver callback which is called when ttm moves a buffer. */ -void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem) +void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_resource *old_mem = bo->resource; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; @@ -1279,13 +1270,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); - - /* update statistics */ - if (!new_mem) - return; - - /* move_notify is called before move happens */ - trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } void amdgpu_bo_get_memory(struct amdgpu_bo *bo, @@ -1348,6 +1332,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) abo = ttm_to_amdgpu_bo(bo); + WARN_ON(abo->vm_bo); + if (abo->kfd_bo) amdgpu_amdkfd_release_notify(abo); @@ -1532,10 +1518,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - uint64_t offset; + uint64_t offset = AMDGPU_BO_INVALID_OFFSET; + + if (bo->tbo.resource->mem_type == TTM_PL_TT) + offset = amdgpu_gmc_agp_addr(&bo->tbo); - offset = (bo->tbo.resource->start << PAGE_SHIFT) + - amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); + if (offset == AMDGPU_BO_INVALID_OFFSET) + offset = (bo->tbo.resource->start << PAGE_SHIFT) + + amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); return amdgpu_gmc_sign_extend(offset); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d28e21bae..a3ea8a82d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -344,9 +344,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, size_t buffer_size, uint32_t *metadata_size, uint64_t *flags); -void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem); +void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 429ef212c..a21045d01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -100,7 +100,7 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp return; } - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 0, 5): @@ -128,7 +128,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp) amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 9): @@ -162,7 +162,7 @@ static int psp_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0): psp_v3_1_set_psp_funcs(psp); psp->autoload_supported = false; @@ -334,7 +334,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, bool ret = false; int i; - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) return false; db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; @@ -413,7 +413,7 @@ static int psp_sw_init(void *handle) adev->psp.xgmi_context.supports_extended_data = !adev->gmc.xgmi.connected_to_cpu && - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2); + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); memset(&scpm_entry, 0, sizeof(scpm_entry)); if ((psp_get_runtime_db_entry(adev, @@ -773,7 +773,7 @@ static int psp_load_toc(struct psp_context *psp, static bool psp_boottime_tmr(struct psp_context *psp) { - switch (psp->adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 6): return true; default: @@ -828,7 +828,7 @@ static int psp_tmr_init(struct psp_context *psp) static bool psp_skip_tmr(struct psp_context *psp) { - switch (psp->adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 7): case IP_VERSION(13, 0, 2): @@ -1215,8 +1215,8 @@ int psp_xgmi_terminate(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && adev->gmc.xgmi.connected_to_cpu)) return 0; @@ -1267,6 +1267,8 @@ invoke: xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + /* note down the capbility flag for XGMI TA */ + psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; return ret; } @@ -1313,9 +1315,11 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { - return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == + IP_VERSION(13, 0, 2) && psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || - psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6); + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= + IP_VERSION(13, 0, 6); } /* @@ -1386,7 +1390,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, /* Fill in the shared memory with topology information as input */ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; topology_info_input->num_nodes = number_devices; for (i = 0; i < topology_info_input->num_nodes; i++) { @@ -1397,7 +1401,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, } /* Invoke xgmi ta to get the topology information */ - ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); + ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); if (ret) return ret; @@ -1422,26 +1426,58 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, /* Invoke xgmi ta again to get the link information */ if (psp_xgmi_peer_link_info_supported(psp)) { - struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output; + struct ta_xgmi_cmd_get_peer_link_info *link_info_output; + struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; bool requires_reflection = - (psp->xgmi_context.supports_extended_data && get_extended_data) || - psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6); + (psp->xgmi_context.supports_extended_data && + get_extended_data) || + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == + IP_VERSION(13, 0, 6); + bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & + EXTEND_PEER_LINK_INFO_CMD_FLAG; + + /* popluate the shared output buffer rather than the cmd input buffer + * with node_ids as the input for GET_PEER_LINKS command execution. + * This is required for GET_PEER_LINKS per xgmi ta implementation. + * The same requirement for GET_EXTEND_PEER_LINKS command. + */ + if (ta_port_num_support) { + link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; + + for (i = 0; i < topology->num_nodes; i++) + link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; + + link_extend_info_output->num_nodes = topology->num_nodes; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; + } else { + link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; + for (i = 0; i < topology->num_nodes; i++) + link_info_output->nodes[i].node_id = topology->nodes[i].node_id; - ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS); + link_info_output->num_nodes = topology->num_nodes; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; + } + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); if (ret) return ret; - link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; for (i = 0; i < topology->num_nodes; i++) { + uint8_t node_num_links = ta_port_num_support ? + link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; /* accumulate num_links on extended data */ - topology->nodes[i].num_links = get_extended_data ? - topology->nodes[i].num_links + - link_info_output->nodes[i].num_links : - ((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links : - link_info_output->nodes[i].num_links); + if (get_extended_data) { + topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; + } else { + topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? + topology->nodes[i].num_links : node_num_links; + } + /* popluate the connected port num info if supported and available */ + if (ta_port_num_support && topology->nodes[i].num_links) { + memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, + sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); + } /* reflect the topology information for bi-directionality */ if (requires_reflection && topology->nodes[i].num_hops) @@ -2089,6 +2125,21 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) return ret; } +int amdgpu_psp_query_boot_status(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + int ret = 0; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return 0; + + if (psp->funcs && + psp->funcs->query_boot_status) + ret = psp->funcs->query_boot_status(psp); + + return ret; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2390,6 +2441,27 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; break; + case AMDGPU_UCODE_ID_VPE_CTX: + *type = GFX_FW_TYPE_VPEC_FW1; + break; + case AMDGPU_UCODE_ID_VPE_CTL: + *type = GFX_FW_TYPE_VPEC_FW2; + break; + case AMDGPU_UCODE_ID_VPE: + *type = GFX_FW_TYPE_VPE; + break; + case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: + *type = GFX_FW_TYPE_UMSCH_UCODE; + break; + case AMDGPU_UCODE_ID_UMSCH_MM_DATA: + *type = GFX_FW_TYPE_UMSCH_DATA; + break; + case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: + *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; + break; + case AMDGPU_UCODE_ID_P2S_TABLE: + *type = GFX_FW_TYPE_P2S_TABLE; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; @@ -2481,6 +2553,31 @@ int psp_execute_ip_fw_load(struct psp_context *psp, return ret; } +static int psp_load_p2s_table(struct psp_context *psp) +{ + int ret; + struct amdgpu_device *adev = psp->adev; + struct amdgpu_firmware_info *ucode = + &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; + + if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) + return 0; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { + uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : + 0x0036003C; + if (psp->sos.fw_version < supp_vers) + return 0; + } + + if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) + return 0; + + ret = psp_execute_ip_fw_load(psp, ucode); + + return ret; +} + static int psp_load_smu_fw(struct psp_context *psp) { int ret; @@ -2499,10 +2596,9 @@ static int psp_load_smu_fw(struct psp_context *psp) if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) return 0; - if ((amdgpu_in_reset(adev) && - ras && adev->ras_enabled && - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { + if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); if (ret) DRM_WARN("Failed to set MP1 state prepare for reload\n"); @@ -2522,6 +2618,9 @@ static bool fw_load_skip_check(struct psp_context *psp, if (!ucode->fw || !ucode->ucode_size) return true; + if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) + return true; + if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && (psp_smu_reload_quirk(psp) || psp->autoload_supported || @@ -2570,6 +2669,9 @@ static int psp_load_non_psp_fw(struct psp_context *psp) return ret; } + /* Load P2S table first if it's available */ + psp_load_p2s_table(psp); + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; @@ -2585,9 +2687,12 @@ static int psp_load_non_psp_fw(struct psp_context *psp) continue; if (psp->autoload_supported && - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) && + (amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(11, 0, 7) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(11, 0, 11) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(11, 0, 12)) && (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) @@ -3128,7 +3233,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev) le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); if (adev->gmc.xgmi.connected_to_cpu || - (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) { + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 3e67ed63e..c4d9cbde5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -134,6 +134,7 @@ struct psp_funcs { int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); + int (*query_boot_status)(struct psp_context *psp); }; struct ta_funcs { @@ -149,6 +150,7 @@ struct psp_xgmi_node_info { uint8_t is_sharing_enabled; enum ta_xgmi_assigned_sdma_engine sdma_engine; uint8_t num_links; + struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM]; }; struct psp_xgmi_topology_info { @@ -189,6 +191,7 @@ struct psp_xgmi_context { struct ta_context context; struct psp_xgmi_topology_info top_info; bool supports_extended_data; + uint8_t xgmi_ta_caps; }; struct psp_ras_context { @@ -536,4 +539,6 @@ int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); +int amdgpu_psp_query_boot_status(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 468a67b30..ca5c86e5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size } } - if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len)) + if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) ret = -EFAULT; err_free_shared_buf: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 6f6341f70..4a3726bb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_ras.h" @@ -152,8 +153,9 @@ static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) { - struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_err_data err_data; struct eeprom_table_record err_rec; + int ret; if ((address >= adev->gmc.mc_vram_size) || (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { @@ -170,6 +172,10 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre return 0; } + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return ret; + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); err_data.err_addr = &err_rec; amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); @@ -180,6 +186,8 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre amdgpu_ras_save_bad_pages(adev, NULL); } + amdgpu_ras_error_data_fini(&err_data); + dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); dev_warn(adev->dev, "Clear EEPROM:\n"); dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); @@ -201,8 +209,8 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, return -EINVAL; /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ - if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && + amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } @@ -611,8 +619,8 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && + amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } @@ -628,8 +636,11 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, static inline void put_obj(struct ras_manager *obj) { - if (obj && (--obj->use == 0)) + if (obj && (--obj->use == 0)) { list_del(&obj->node); + amdgpu_ras_error_data_fini(&obj->err_data); + } + if (obj && (obj->use < 0)) DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } @@ -659,6 +670,9 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, if (alive_obj(obj)) return NULL; + if (amdgpu_ras_error_data_init(&obj->err_data)) + return NULL; + obj->head = *head; obj->adev = adev; list_add(&obj->node, &con->head); @@ -769,9 +783,10 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!con) return -EINVAL; - /* Do not enable ras feature if it is not allowed */ - if (enable && - head->block != AMDGPU_RAS_BLOCK__GFX && + /* For non-gfx ip, do not enable ras feature if it is not allowed */ + /* For gfx ip, regardless of feature support status, */ + /* Force issue enable or disable ras feature commands */ + if (head->block != AMDGPU_RAS_BLOCK__GFX && !amdgpu_ras_is_feature_allowed(adev, head)) return 0; @@ -1014,105 +1029,269 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d } } -/* query/inject/cure begin */ -int amdgpu_ras_query_error_status(struct amdgpu_device *adev, - struct ras_query_if *info) -{ - struct amdgpu_ras_block_object *block_obj = NULL; - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); - struct ras_err_data err_data = {0, 0, 0, NULL}; +static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, + struct ras_manager *ras_mgr, + struct ras_err_data *err_data, + const char *blk_name, + bool is_ue) +{ + struct amdgpu_smuio_mcm_config_info *mcm_info; + struct ras_err_node *err_node; + struct ras_err_info *err_info; + + if (is_ue) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ue_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new uncorrectable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ue_count, + blk_name); + } + } - if (!obj) - return -EINVAL; + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld uncorrectable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); + } - if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { - amdgpu_ras_get_ecc_info(adev, &err_data); } else { - block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); - if (!block_obj || !block_obj->hw_ops) { - dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", - get_ras_block_str(&info->head)); - return -EINVAL; + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ce_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block, " + "no user action is needed\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); + } } - if (block_obj->hw_ops->query_ras_error_count) - block_obj->hw_ops->query_ras_error_count(adev, &err_data); - - if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || - (info->head.block == AMDGPU_RAS_BLOCK__GFX) || - (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { - if (block_obj->hw_ops->query_ras_error_status) - block_obj->hw_ops->query_ras_error_status(adev); - } + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block, " + "no user action is needed\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); + } } +} - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; +static inline bool err_data_has_source_info(struct ras_err_data *data) +{ + return !list_empty(&data->err_node_list); +} - info->ue_count = obj->err_data.ue_count; - info->ce_count = obj->err_data.ce_count; +static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, + struct ras_query_if *query_if, + struct ras_err_data *err_data) +{ + struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); + const char *blk_name = get_ras_block_str(&query_if->head); - if (err_data.ce_count) { - if (!adev->aid_mask && - adev->smuio.funcs && - adev->smuio.funcs->get_socket_id && - adev->smuio.funcs->get_die_id) { + if (err_data->ce_count) { + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false); + } else if (!adev->aid_mask && + adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " - "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - obj->err_data.ce_count, - get_ras_block_str(&info->head)); + "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ce_count, + blk_name); } else { dev_info(adev->dev, "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", - obj->err_data.ce_count, - get_ras_block_str(&info->head)); + "detected in %s block, no user " + "action is needed.\n", + ras_mgr->err_data.ce_count, + blk_name); } } - if (err_data.ue_count) { - if (!adev->aid_mask && - adev->smuio.funcs && - adev->smuio.funcs->get_socket_id && - adev->smuio.funcs->get_die_id) { + + if (err_data->ue_count) { + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true); + } else if (!adev->aid_mask && + adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " - "%ld uncorrectable hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - obj->err_data.ue_count, - get_ras_block_str(&info->head)); + "%ld uncorrectable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ue_count, + blk_name); } else { dev_info(adev->dev, "%ld uncorrectable hardware errors " - "detected in %s block\n", - obj->err_data.ue_count, - get_ras_block_str(&info->head)); + "detected in %s block\n", + ras_mgr->err_data.ue_count, + blk_name); + } + } + +} + +static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) +{ + struct ras_err_node *err_node; + struct ras_err_info *err_info; + + if (err_data_has_source_info(err_data)) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + + amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count); + amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count); + } + } else { + /* for legacy asic path which doesn't has error source info */ + obj->err_data.ue_count += err_data->ue_count; + obj->err_data.ce_count += err_data->ce_count; + } +} + +static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, + struct ras_query_if *info, + struct ras_err_data *err_data, + unsigned int error_query_mode) +{ + enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; + struct amdgpu_ras_block_object *block_obj = NULL; + + if (blk == AMDGPU_RAS_BLOCK_COUNT) + return -EINVAL; + + if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) + return -EINVAL; + + if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { + amdgpu_ras_get_ecc_info(adev, err_data); + } else { + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); + if (!block_obj || !block_obj->hw_ops) { + dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", + get_ras_block_str(&info->head)); + return -EINVAL; + } + + if (block_obj->hw_ops->query_ras_error_count) + block_obj->hw_ops->query_ras_error_count(adev, err_data); + + if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || + (info->head.block == AMDGPU_RAS_BLOCK__GFX) || + (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); + } } + } else { + /* FIXME: add code to check return value later */ + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); } return 0; } -int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, - enum amdgpu_ras_block block) +/* query/inject/cure begin */ +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { - struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); + struct ras_err_data err_data; + unsigned int error_query_mode; + int ret; - if (!amdgpu_ras_is_supported(adev, block)) + if (!obj) return -EINVAL; - if (!block_obj || !block_obj->hw_ops) { - dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", - ras_block_str(block)); + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return ret; + + if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) return -EINVAL; + + ret = amdgpu_ras_query_error_status_helper(adev, info, + &err_data, + error_query_mode); + if (ret) + goto out_fini_err_data; + + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + + info->ue_count = obj->err_data.ue_count; + info->ce_count = obj->err_data.ce_count; + + amdgpu_ras_error_generate_report(adev, info, &err_data); + +out_fini_err_data: + amdgpu_ras_error_data_fini(&err_data); + + return ret; +} + +int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, + enum amdgpu_ras_block block) +{ + struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + struct amdgpu_hive_info *hive; + int hive_ras_recovery = 0; + + if (!block_obj || !block_obj->hw_ops) { + dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", + ras_block_str(block)); + return -EOPNOTSUPP; + } + + if (!amdgpu_ras_is_supported(adev, block) || + !amdgpu_ras_get_mca_debug_mode(adev)) + return -EOPNOTSUPP; + + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); } + /* skip ras error reset in gpu reset */ + if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) || + hive_ras_recovery) && + mca_funcs && mca_funcs->mca_set_debug_mode) + return -EOPNOTSUPP; + if (block_obj->hw_ops->reset_ras_error_count) block_obj->hw_ops->reset_ras_error_count(adev); + return 0; +} + +int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, + enum amdgpu_ras_block block) +{ + struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + + if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP) + return 0; + if ((block == AMDGPU_RAS_BLOCK__GFX) || (block == AMDGPU_RAS_BLOCK__MMHUB)) { if (block_obj->hw_ops->reset_ras_error_status) @@ -1208,8 +1387,8 @@ static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, /* some hardware/IP supports read to clear * no need to explictly reset the err status after the query call */ - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && + amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) dev_warn(adev->dev, "Failed to reset error counter and error status\n"); @@ -1369,6 +1548,22 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); } +static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct amdgpu_ras *con = + container_of(attr, struct amdgpu_ras, version_attr); + return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); +} + +static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct amdgpu_ras *con = + container_of(attr, struct amdgpu_ras, schema_attr); + return sysfs_emit(buf, "schema: 0x%x\n", con->schema); +} + static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1379,11 +1574,13 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) RAS_FS_NAME); } -static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) +static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct attribute *attrs[] = { &con->features_attr.attr, + &con->version_attr.attr, + &con->schema_attr.attr, NULL }; struct attribute_group group = { @@ -1461,7 +1658,7 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) if (amdgpu_bad_page_threshold != 0) amdgpu_ras_sysfs_remove_bad_page_node(adev); - amdgpu_ras_sysfs_remove_feature_node(adev); + amdgpu_ras_sysfs_remove_dev_attr_node(adev); return 0; } @@ -1573,6 +1770,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_create(adev, &fs_info, dir); } } + + amdgpu_mca_smu_debugfs_init(adev, dir); } /* debugfs end */ @@ -1582,6 +1781,10 @@ static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, amdgpu_ras_sysfs_badpages_read, NULL, 0); static DEVICE_ATTR(features, S_IRUGO, amdgpu_ras_sysfs_features_read, NULL); +static DEVICE_ATTR(version, 0444, + amdgpu_ras_sysfs_version_show, NULL); +static DEVICE_ATTR(schema, 0444, + amdgpu_ras_sysfs_schema_show, NULL); static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1590,6 +1793,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) }; struct attribute *attrs[] = { &con->features_attr.attr, + &con->version_attr.attr, + &con->schema_attr.attr, NULL }; struct bin_attribute *bin_attrs[] = { @@ -1598,11 +1803,20 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) }; int r; + group.attrs = attrs; + /* add features entry */ con->features_attr = dev_attr_features; - group.attrs = attrs; sysfs_attr_init(attrs[0]); + /* add version entry */ + con->version_attr = dev_attr_version; + sysfs_attr_init(attrs[1]); + + /* add schema entry */ + con->schema_attr = dev_attr_schema; + sysfs_attr_init(attrs[2]); + if (amdgpu_bad_page_threshold != 0) { /* add bad_page_features entry */ bin_attr_gpu_vram_bad_pages.private = NULL; @@ -1711,12 +1925,16 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, struct amdgpu_iv_entry *entry) { struct ras_ih_data *data = &obj->ih_data; - struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_err_data err_data; int ret; if (!data->cb) return; + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return; + /* Let IP handle its data, maybe we need get the output * from the callback to update the error type/count, etc */ @@ -1733,6 +1951,8 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, obj->err_data.ue_count += err_data.ue_count; obj->err_data.ce_count += err_data.ce_count; } + + amdgpu_ras_error_data_fini(&err_data); } static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) @@ -1908,14 +2128,18 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) * should be removed until smu fix handle ecc_info table. */ if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) + (amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 2))) continue; amdgpu_ras_query_error_status(adev, &info); - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != + IP_VERSION(11, 0, 2) && + amdgpu_ip_version(adev, MP0_HWIP, 0) != + IP_VERSION(11, 0, 4) && + amdgpu_ip_version(adev, MP0_HWIP, 0) != + IP_VERSION(13, 0, 0)) { if (amdgpu_ras_reset_error_status(adev, info.head.block)) dev_warn(adev->dev, "Failed to reset error counter and error status"); } @@ -2024,9 +2248,11 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_device *remote_adev = NULL; struct amdgpu_device *adev = ras->adev; struct list_head device_list, *device_list_handle = NULL; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + if (hive) + atomic_set(&hive->ras_recovery, 1); if (!ras->disable_ras_err_cnt_harvest) { - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); /* Build list of devices to query RAS related errors */ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { @@ -2043,7 +2269,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) amdgpu_ras_log_on_err_counter(remote_adev); } - amdgpu_put_xgmi_hive(hive); } if (amdgpu_device_should_recover_gpu(ras->adev)) { @@ -2078,6 +2303,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); } atomic_set(&ras->in_recovery, 0); + if (hive) { + atomic_set(&hive->ras_recovery, 0); + amdgpu_put_xgmi_hive(hive); + } } /* alloc/realloc bps array */ @@ -2403,7 +2632,7 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): return true; @@ -2413,7 +2642,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) } if (adev->asic_type == CHIP_IP_DISCOVERY) { - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): @@ -2487,8 +2716,12 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) /* VCN/JPEG RAS can be supported on both bare metal and * SRIOV environment */ - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || - adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(2, 6, 0) || + amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(4, 0, 0) || + amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(4, 0, 3)) adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 1 << AMDGPU_RAS_BLOCK__JPEG); else @@ -2517,18 +2750,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) /* hw_supported needs to be aligned with RAS block mask. */ adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; - - /* - * Disable ras feature for aqua vanjaram - * by default on apu platform. - */ - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) && - adev->gmc.is_app_apu) - adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 : - adev->ras_hw_enabled & amdgpu_ras_mask; - else - adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : - adev->ras_hw_enabled & amdgpu_ras_mask; + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; } static void amdgpu_ras_counte_dw(struct work_struct *work) @@ -2566,7 +2789,8 @@ static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) return; /* Init poison supported flag, the default value is false */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (adev->gmc.xgmi.connected_to_cpu || + adev->gmc.is_app_apu) { /* enabled by default when GPU is connected to CPU */ con->poison_supported = true; } else if (adev->df.funcs && @@ -2588,6 +2812,14 @@ static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) } } +static int amdgpu_get_ras_schema(struct amdgpu_device *adev) +{ + return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | + AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE | + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE | + AMDGPU_RAS_ERROR__PARITY; +} + int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -2630,6 +2862,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev) con->update_channel_flag = false; con->features = 0; + con->schema = 0; INIT_LIST_HEAD(&con->head); /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; @@ -2637,7 +2870,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* initialize nbio ras function ahead of any other * ras functions so hardware fatal error interrupt * can be enabled as early as possible */ - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 4, 0): case IP_VERSION(7, 4, 1): case IP_VERSION(7, 4, 4): @@ -2685,6 +2918,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_query_poison_mode(adev); + /* Get RAS schema for particular SOC */ + con->schema = amdgpu_get_ras_schema(adev); + if (amdgpu_ras_fs_init(adev)) { r = -EINVAL; goto release_con; @@ -3173,6 +3409,47 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) return 0; } +void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (con) + con->is_mca_debug_mode = enable; +} + +bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!con) + return false; + + if (mca_funcs && mca_funcs->mca_set_debug_mode) + return con->is_mca_debug_mode; + else + return true; +} + +bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, + unsigned int *error_query_mode) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!con) { + *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; + return false; + } + + if (mca_funcs && mca_funcs->mca_set_debug_mode) + *error_query_mode = + (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; + else + *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; + + return true; +} /* Register each ip ras block into amdgpu ras */ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, @@ -3332,3 +3609,141 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, WREG32(err_status_hi_offset, 0); } } + +int amdgpu_ras_error_data_init(struct ras_err_data *err_data) +{ + memset(err_data, 0, sizeof(*err_data)); + + INIT_LIST_HEAD(&err_data->err_node_list); + + return 0; +} + +static void amdgpu_ras_error_node_release(struct ras_err_node *err_node) +{ + if (!err_node) + return; + + list_del(&err_node->node); + kvfree(err_node); +} + +void amdgpu_ras_error_data_fini(struct ras_err_data *err_data) +{ + struct ras_err_node *err_node, *tmp; + + list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) + amdgpu_ras_error_node_release(err_node); +} + +static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info) +{ + struct ras_err_node *err_node; + struct amdgpu_smuio_mcm_config_info *ref_id; + + if (!err_data || !mcm_info) + return NULL; + + for_each_ras_error(err_node, err_data) { + ref_id = &err_node->err_info.mcm_info; + + if (mcm_info->socket_id == ref_id->socket_id && + mcm_info->die_id == ref_id->die_id) + return err_node; + } + + return NULL; +} + +static struct ras_err_node *amdgpu_ras_error_node_new(void) +{ + struct ras_err_node *err_node; + + err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL); + if (!err_node) + return NULL; + + INIT_LIST_HEAD(&err_node->node); + + return err_node; +} + +static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) +{ + struct ras_err_node *nodea = container_of(a, struct ras_err_node, node); + struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node); + struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info; + struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info; + + if (unlikely(infoa->socket_id != infob->socket_id)) + return infoa->socket_id - infob->socket_id; + else + return infoa->die_id - infob->die_id; + + return 0; +} + +static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info) +{ + struct ras_err_node *err_node; + + err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info); + if (err_node) + return &err_node->err_info; + + err_node = amdgpu_ras_error_node_new(); + if (!err_node) + return NULL; + + memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); + + err_data->err_list_count++; + list_add_tail(&err_node->node, &err_data->err_node_list); + list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); + + return &err_node->err_info; +} + +int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) +{ + struct ras_err_info *err_info; + + if (!err_data || !mcm_info) + return -EINVAL; + + if (!count) + return 0; + + err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + if (!err_info) + return -EINVAL; + + err_info->ue_count += count; + err_data->ue_count += count; + + return 0; +} + +int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) +{ + struct ras_err_info *err_info; + + if (!err_data || !mcm_info) + return -EINVAL; + + if (!count) + return 0; + + err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + if (!err_info) + return -EINVAL; + + err_info->ce_count += count; + err_data->ce_count += count; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index ffb49b2d5..19161916a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -28,6 +28,7 @@ #include #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" +#include "amdgpu_smuio.h" struct amdgpu_iv_entry; @@ -319,6 +320,12 @@ enum amdgpu_ras_ret { AMDGPU_RAS_PT, }; +enum amdgpu_ras_error_query_mode { + AMDGPU_RAS_INVALID_ERROR_QUERY = 0, + AMDGPU_RAS_DIRECT_ERROR_QUERY = 1, + AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2, +}; + /* ras error status reisger fields */ #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L @@ -389,9 +396,12 @@ struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ uint32_t features; + uint32_t schema; struct list_head head; /* sysfs */ struct device_attribute features_attr; + struct device_attribute version_attr; + struct device_attribute schema_attr; struct bin_attribute badpages_attr; struct dentry *de_ras_eeprom_table; /* block array */ @@ -430,23 +440,41 @@ struct amdgpu_ras { /* Indicates smu whether need update bad channel info */ bool update_channel_flag; + /* Record status of smu mca debug mode */ + bool is_mca_debug_mode; /* Record special requirements of gpu reset caller */ uint32_t gpu_reset_flags; }; struct ras_fs_data { - char sysfs_name[32]; + char sysfs_name[48]; char debugfs_name[32]; }; +struct ras_err_info { + struct amdgpu_smuio_mcm_config_info mcm_info; + u64 ce_count; + u64 ue_count; +}; + +struct ras_err_node { + struct list_head node; + struct ras_err_info err_info; +}; + struct ras_err_data { unsigned long ue_count; unsigned long ce_count; unsigned long err_addr_cnt; struct eeprom_table_record *err_addr; + u32 err_list_count; + struct list_head err_node_list; }; +#define for_each_ras_error(err_node, err_data) \ + list_for_each_entry(err_node, &(err_data)->err_node_list, node) + struct ras_err_handler_data { /* point to bad page records array */ struct eeprom_table_record *bps; @@ -691,6 +719,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info); +int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, + enum amdgpu_ras_block block); int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, enum amdgpu_ras_block block); @@ -743,6 +773,11 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con); +void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); +bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev); +bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, + unsigned int *mode); + int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object *ras_block_obj); void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev); @@ -767,4 +802,12 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, const struct amdgpu_ras_err_status_reg_entry *reg_list, uint32_t reg_list_size, uint32_t instance); + +int amdgpu_ras_error_data_init(struct ras_err_data *err_data); +void amdgpu_ras_error_data_fini(struct ras_err_data *err_data); +int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); +int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 9d82701d3..2fde93b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -149,11 +149,11 @@ RAS_TABLE_HEADER_SIZE - \ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE) -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev +#define to_amdgpu_device(x) ((container_of(x, struct amdgpu_ras, eeprom_control))->adev) static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) { - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */ case IP_VERSION(11, 0, 7): /* Sienna cichlid */ case IP_VERSION(13, 0, 0): @@ -191,7 +191,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, return true; } - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */ if (adev->asic_type == CHIP_VEGA20) @@ -622,7 +622,8 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control, __encode_table_record_to_buf(control, &record[i], pp); /* update bad channel bitmap */ - if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { control->bad_channel_bitmap |= 1 << record[i].mem_channel; con->update_channel_flag = true; } @@ -975,7 +976,8 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, __decode_table_record_from_buf(control, &record[i], pp); /* update bad channel bitmap */ - if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { control->bad_channel_bitmap |= 1 << record[i].mem_channel; con->update_channel_flag = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 3c988cc40..381101d2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -112,7 +112,6 @@ fallback: cur->remaining = size; cur->node = NULL; WARN_ON(res && start + size > res->size); - return; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 5fed06ffc..4baa30012 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -21,24 +21,19 @@ * */ +#include +#include + #include "amdgpu_reset.h" #include "aldebaran.h" #include "sienna_cichlid.h" #include "smu_v13_0_10.h" -int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, - struct amdgpu_reset_handler *handler) -{ - /* TODO: Check if handler exists? */ - list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers); - return 0; -} - int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): ret = aldebaran_reset_init(adev); @@ -60,7 +55,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev) { int ret = 0; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): ret = aldebaran_reset_fini(adev); @@ -167,5 +162,82 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) up_write(&reset_domain->sem); } +#ifndef CONFIG_DEV_COREDUMP +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ +} +#else +static ssize_t +amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen) +{ + struct drm_printer p; + struct amdgpu_coredump_info *coredump = data; + struct drm_print_iterator iter; + int i; + + iter.data = buffer; + iter.offset = 0; + iter.start = offset; + iter.remain = count; + + p = drm_coredump_printer(&iter); + + drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); + drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); + drm_printf(&p, "kernel: " UTS_RELEASE "\n"); + drm_printf(&p, "module: " KBUILD_MODNAME "\n"); + drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, + coredump->reset_time.tv_nsec); + + if (coredump->reset_task_info.pid) + drm_printf(&p, "process_name: %s PID: %d\n", + coredump->reset_task_info.process_name, + coredump->reset_task_info.pid); + + if (coredump->reset_vram_lost) + drm_printf(&p, "VRAM is lost due to GPU reset!\n"); + if (coredump->adev->reset_info.num_regs) { + drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); + + for (i = 0; i < coredump->adev->reset_info.num_regs; i++) + drm_printf(&p, "0x%08x: 0x%08x\n", + coredump->adev->reset_info.reset_dump_reg_list[i], + coredump->adev->reset_info.reset_dump_reg_value[i]); + } + + return count - iter.remain; +} + +static void amdgpu_devcoredump_free(void *data) +{ + kfree(data); +} + +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_coredump_info *coredump; + struct drm_device *dev = adev_to_drm(adev); + + coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); + if (!coredump) { + DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); + return; + } + coredump->reset_vram_lost = vram_lost; + + if (reset_context->job && reset_context->job->vm) + coredump->reset_task_info = reset_context->job->vm->task_info; + + coredump->adev = adev; + + ktime_get_ts64(&coredump->reset_time); + + dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT, + amdgpu_devcoredump_read, amdgpu_devcoredump_free); +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index f4a501ff8..19899f6b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -26,11 +26,12 @@ #include "amdgpu.h" +#define AMDGPU_RESET_MAX_HANDLERS 5 + enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, - AMDGPU_RESET_FOR_DEVICE_REMOVE = 2, }; struct amdgpu_reset_context { @@ -44,7 +45,6 @@ struct amdgpu_reset_context { struct amdgpu_reset_handler { enum amd_reset_method reset_method; - struct list_head handler_list; int (*prepare_env)(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_context *context); int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl, @@ -63,7 +63,8 @@ struct amdgpu_reset_control { void *handle; struct work_struct reset_work; struct mutex reset_lock; - struct list_head reset_handlers; + struct amdgpu_reset_handler *( + *reset_handlers)[AMDGPU_RESET_MAX_HANDLERS]; atomic_t in_reset; enum amd_reset_method active_reset; struct amdgpu_reset_handler *(*get_reset_handler)( @@ -87,6 +88,17 @@ struct amdgpu_reset_domain { atomic_t reset_res; }; +#ifdef CONFIG_DEV_COREDUMP + +#define AMDGPU_COREDUMP_VERSION "1" + +struct amdgpu_coredump_info { + struct amdgpu_device *adev; + struct amdgpu_task_info reset_task_info; + struct timespec64 reset_time; + bool reset_vram_lost; +}; +#endif int amdgpu_reset_init(struct amdgpu_device *adev); int amdgpu_reset_fini(struct amdgpu_device *adev); @@ -97,8 +109,10 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, int amdgpu_reset_perform_reset(struct amdgpu_device *adev, struct amdgpu_reset_context *reset_context); -int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, - struct amdgpu_reset_handler *handler); +int amdgpu_reset_prepare_env(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context); +int amdgpu_reset_restore_env(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context); struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type, char *wq_name); @@ -126,4 +140,11 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain); void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain); +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context); + +#define for_each_handler(i, handler, reset_ctl) \ + for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ + (handler = (*reset_ctl->reset_handlers)[i]); \ + ++i) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 80d6e132e..45424ebf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -434,8 +434,12 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, struct dma_fence *fence) { unsigned long flags; + ktime_t deadline; - ktime_t deadline = ktime_add_us(ktime_get(), 10000); + if (unlikely(ring->adev->debug_disable_soft_recovery)) + return false; + + deadline = ktime_add_us(ktime_get(), 10000); if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) return false; @@ -638,6 +642,10 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, struct amdgpu_mqd_prop *prop) { struct amdgpu_device *adev = ring->adev; + bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && + amdgpu_gfx_is_high_priority_compute_queue(adev, ring); + bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX && + amdgpu_gfx_is_high_priority_graphics_queue(adev, ring); memset(prop, 0, sizeof(*prop)); @@ -655,10 +663,8 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, */ prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ; - if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && - amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) || - (ring->funcs->type == AMDGPU_RING_TYPE_GFX && - amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) { + prop->allow_tunneling = is_high_prio_compute; + if (is_high_prio_compute || is_high_prio_gfx) { prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index e2ab303ad..bbb53720a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -44,6 +44,7 @@ struct amdgpu_vm; #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_UVD_ENC_RINGS 2 +#define AMDGPU_MAX_VPE_RINGS 2 enum amdgpu_ring_priority_level { AMDGPU_RING_PRIO_0, @@ -77,8 +78,10 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, + AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE, AMDGPU_RING_TYPE_KIQ, - AMDGPU_RING_TYPE_MES + AMDGPU_RING_TYPE_MES, + AMDGPU_RING_TYPE_UMSCH_MM, }; enum amdgpu_ib_pool_type { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index e2b9392d7..1d9d187de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -208,7 +208,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, const struct sdma_firmware_header_v2_0 *sdma_hdr; uint16_t version_major; char ucode_prefix[30]; - char fw_name[40]; + char fw_name[52]; amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix)); if (instance == 0) @@ -251,8 +251,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, else { /* Use a single copy per SDMA firmware type. PSP uses the same instance for all * groups of SDMAs */ - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) && - adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + if (amdgpu_ip_version(adev, SDMA0_HWIP, + 0) == + IP_VERSION(4, 4, 2) && + adev->firmware.load_type == + AMDGPU_FW_LOAD_PSP && adev->sdma.num_inst_per_aid == i) { break; } @@ -289,27 +292,6 @@ out: return err; } -void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev) -{ - struct amdgpu_ring *sdma; - int i; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.has_page_queue) { - sdma = &adev->sdma.instance[i].page; - if (adev->mman.buffer_funcs_ring == sdma) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - break; - } - } - sdma = &adev->sdma.instance[i].ring; - if (adev->mman.buffer_funcs_ring == sdma) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - break; - } - } -} - int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) { int err = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 513ac2212..173a2a308 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -169,7 +169,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, bool duplicate); void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); -void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev); int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index 89c38d864..ff4435181 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -23,6 +23,18 @@ #ifndef __AMDGPU_SMUIO_H__ #define __AMDGPU_SMUIO_H__ +enum amdgpu_pkg_type { + AMDGPU_PKG_TYPE_APU = 2, + AMDGPU_PKG_TYPE_CEM = 3, + AMDGPU_PKG_TYPE_OAM = 4, + AMDGPU_PKG_TYPE_UNKNOWN, +}; + +struct amdgpu_smuio_mcm_config_info { + int socket_id; + int die_id; +}; + struct amdgpu_smuio_funcs { u32 (*get_rom_index_offset)(struct amdgpu_device *adev); u32 (*get_rom_data_offset)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index dcd8c066b..1b013a44c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev, /* Never sync to VM updates either. */ if (fence_owner == AMDGPU_FENCE_OWNER_VM && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) + owner != AMDGPU_FENCE_OWNER_UNDEFINED && + owner != AMDGPU_FENCE_OWNER_KFD) return false; /* Ignore fences depending on the sync mode */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4e51dce3a..75c9fd2c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -545,10 +545,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } + trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); out: /* update statistics */ atomic64_add(bo->base.size, &adev->num_bytes_moved); - amdgpu_bo_move_notify(bo, evict, new_mem); + amdgpu_bo_move_notify(bo, evict); return 0; } @@ -959,10 +960,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) return 0; addr = amdgpu_gmc_agp_addr(bo); - if (addr != AMDGPU_BO_INVALID_OFFSET) { - bo->resource->start = addr >> PAGE_SHIFT; + if (addr != AMDGPU_BO_INVALID_OFFSET) return 0; - } /* allocate GART space */ placement.num_placement = 1; @@ -1555,7 +1554,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, static void amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) { - amdgpu_bo_move_notify(bo, false, NULL); + amdgpu_bo_move_notify(bo, false); } static struct ttm_device_funcs amdgpu_bo_driver = { @@ -1727,7 +1726,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) reserve_size = amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); - if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (!adev->bios && + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) reserve_size = max(reserve_size, (uint32_t)280 << 20); else if (!reserve_size) reserve_size = DISCOVERY_TMR_OFFSET; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 8beefc045..0efb2568c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -642,6 +642,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "SMC"; case AMDGPU_UCODE_ID_PPTABLE: return "PPTABLE"; + case AMDGPU_UCODE_ID_P2S_TABLE: + return "P2STABLE"; case AMDGPU_UCODE_ID_UVD: return "UVD"; case AMDGPU_UCODE_ID_UVD1: @@ -664,20 +666,42 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "DMCUB"; case AMDGPU_UCODE_ID_CAP: return "CAP"; + case AMDGPU_UCODE_ID_VPE_CTX: + return "VPE_CTX"; + case AMDGPU_UCODE_ID_VPE_CTL: + return "VPE_CTL"; + case AMDGPU_UCODE_ID_VPE: + return "VPE"; + case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: + return "UMSCH_MM_UCODE"; + case AMDGPU_UCODE_ID_UMSCH_MM_DATA: + return "UMSCH_MM_DATA"; + case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: + return "UMSCH_MM_CMD_BUFFER"; default: return "UNKNOWN UCODE"; } } +static inline int amdgpu_ucode_is_valid(uint32_t fw_version) +{ + if (!fw_version) + return -EINVAL; + + return 0; +} + #define FW_VERSION_ATTR(name, mode, field) \ static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ + struct device_attribute *attr, char *buf) \ { \ struct drm_device *ddev = dev_get_drvdata(dev); \ struct amdgpu_device *adev = drm_to_adev(ddev); \ \ - return sysfs_emit(buf, "0x%08x\n", adev->field); \ + if (!buf) \ + return amdgpu_ucode_is_valid(adev->field); \ + \ + return sysfs_emit(buf, "0x%08x\n", adev->field); \ } \ static DEVICE_ATTR(name, mode, show_##name, NULL) @@ -722,9 +746,24 @@ static struct attribute *fw_attrs[] = { NULL }; +#define to_dev_attr(x) container_of(x, struct device_attribute, attr) + +static umode_t amdgpu_ucode_sys_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device_attribute *dev_attr = to_dev_attr(attr); + struct device *dev = kobj_to_dev(kobj); + + if (dev_attr->show(dev, dev_attr, NULL) == -EINVAL) + return 0; + + return attr->mode; +} + static const struct attribute_group fw_attr_group = { .name = "fw_version", - .attrs = fw_attrs + .attrs = fw_attrs, + .is_visible = amdgpu_ucode_sys_visible }; int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev) @@ -749,6 +788,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct mes_firmware_header_v1_0 *mes_hdr = NULL; const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL; const struct imu_firmware_header_v1_0 *imu_hdr = NULL; + const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL; + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL; u8 *ucode_addr; if (!ucode->fw) @@ -768,6 +809,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data; imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data; + vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data; + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { switch (ucode->ucode_id) { @@ -884,6 +927,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode->ucode_size = ucode->fw->size; ucode_addr = (u8 *)ucode->fw->data; break; + case AMDGPU_UCODE_ID_P2S_TABLE: + ucode->ucode_size = ucode->fw->size; + ucode_addr = (u8 *)ucode->fw->data; + break; case AMDGPU_UCODE_ID_IMU_I: ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes); ucode_addr = (u8 *)ucode->fw->data + @@ -950,6 +997,26 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode_addr = (u8 *)ucode->fw->data + le32_to_cpu(cpv2_hdr->data_offset_bytes); break; + case AMDGPU_UCODE_ID_VPE_CTX: + ucode->ucode_size = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_VPE_CTL: + ucode->ucode_size = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(vpe_hdr->ctl_ucode_offset); + break; + case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: + ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(umsch_mm_hdr->header.ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_UMSCH_MM_DATA: + ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes); + break; default: ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); ucode_addr = (u8 *)ucode->fw->data + @@ -1061,7 +1128,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type) { if (block_type == MP0_HWIP) { - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0): switch (adev->asic_type) { case CHIP_VEGA10: @@ -1112,7 +1179,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return "yellow_carp"; } } else if (block_type == MP1_HWIP) { - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): @@ -1138,7 +1205,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return "aldebaran_smc"; } } else if (block_type == SDMA0_HWIP) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): return "vega10_sdma"; case IP_VERSION(4, 0, 1): @@ -1182,7 +1249,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return "vangogh_sdma"; } } else if (block_type == UVD_HWIP) { - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): if (adev->apu_flags & AMD_APU_IS_RAVEN2) @@ -1207,7 +1274,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 192): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 0)) return "sienna_cichlid_vcn"; return "navy_flounder_vcn"; case IP_VERSION(3, 0, 2): @@ -1220,7 +1288,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return "yellow_carp_vcn"; } } else if (block_type == GC_HWIP) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): return "vega10"; case IP_VERSION(9, 2, 1): @@ -1273,7 +1341,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, int maj, min, rev; char *ip_name; const char *legacy; - uint32_t version = adev->ip_versions[block_type][0]; + uint32_t version = amdgpu_ip_version(adev, block_type, 0); legacy = amdgpu_ucode_legacy_naming(adev, block_type); if (legacy) { @@ -1297,6 +1365,9 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, case UVD_HWIP: ip_name = "vcn"; break; + case VPE_HWIP: + ip_name = "vpe"; + break; default: BUG(); } @@ -1326,9 +1397,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw, if (err) return -ENODEV; + err = amdgpu_ucode_validate(*fw); - if (err) + if (err) { dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name); + release_firmware(*fw); + *fw = NULL; + } return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index b03321e7d..4244a13f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -315,6 +315,36 @@ struct sdma_firmware_header_v2_0 { uint32_t ctl_jt_size; /* control thread size of jt */ }; +/* version_major=1, version_minor=0 */ +struct vpe_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ucode_feature_version; + uint32_t ctx_ucode_size_bytes; /* context thread ucode size */ + uint32_t ctx_jt_offset; /* context thread jt location */ + uint32_t ctx_jt_size; /* context thread size of jt */ + uint32_t ctl_ucode_offset; + uint32_t ctl_ucode_size_bytes; /* control thread ucode size */ + uint32_t ctl_jt_offset; /* control thread jt location */ + uint32_t ctl_jt_size; /* control thread size of jt */ +}; + +/* version_major=1, version_minor=0 */ +struct umsch_mm_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t umsch_mm_ucode_version; + uint32_t umsch_mm_ucode_size_bytes; + uint32_t umsch_mm_ucode_offset_bytes; + uint32_t umsch_mm_ucode_data_version; + uint32_t umsch_mm_ucode_data_size_bytes; + uint32_t umsch_mm_ucode_data_offset_bytes; + uint32_t umsch_mm_irq_start_addr_lo; + uint32_t umsch_mm_irq_start_addr_hi; + uint32_t umsch_mm_uc_start_addr_lo; + uint32_t umsch_mm_uc_start_addr_hi; + uint32_t umsch_mm_data_start_addr_lo; + uint32_t umsch_mm_data_start_addr_hi; +}; + /* gpu info payload */ struct gpu_info_firmware_v1_0 { uint32_t gc_num_se; @@ -474,6 +504,13 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_VCN0_RAM, AMDGPU_UCODE_ID_VCN1_RAM, AMDGPU_UCODE_ID_DMCUB, + AMDGPU_UCODE_ID_VPE_CTX, + AMDGPU_UCODE_ID_VPE_CTL, + AMDGPU_UCODE_ID_VPE, + AMDGPU_UCODE_ID_UMSCH_MM_UCODE, + AMDGPU_UCODE_ID_UMSCH_MM_DATA, + AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER, + AMDGPU_UCODE_ID_P2S_TABLE, AMDGPU_UCODE_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index db0d94ca4..d65e21914 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -28,7 +28,7 @@ static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst) { - switch (adev->ip_versions[UMC_HWIP][0]) { + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { case IP_VERSION(6, 7, 0): umc_v6_7_convert_error_address(adev, err_data, err_addr, ch_inst, umc_inst); @@ -45,8 +45,12 @@ static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev, int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst) { - struct ras_err_data err_data = {0, 0, 0, NULL}; - int ret = AMDGPU_RAS_FAIL; + struct ras_err_data err_data; + int ret; + + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return ret; err_data.err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, @@ -54,7 +58,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, if (!err_data.err_addr) { dev_warn(adev->dev, "Failed to alloc memory for umc error record in MCA notifier!\n"); - return AMDGPU_RAS_FAIL; + ret = AMDGPU_RAS_FAIL; + goto out_fini_err_data; } /* @@ -63,7 +68,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr, ch_inst, umc_inst); if (ret) - goto out; + goto out_free_err_addr; if (amdgpu_bad_page_threshold != 0) { amdgpu_ras_add_bad_pages(adev, err_data.err_addr, @@ -71,8 +76,12 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, amdgpu_ras_save_bad_pages(adev, NULL); } -out: +out_free_err_addr: kfree(err_data.err_addr); + +out_fini_err_data: + amdgpu_ras_error_data_fini(&err_data); + return ret; } @@ -157,8 +166,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, } } - if (reset) + if (reset) { + /* use mode-2 reset for poison consumption */ + if (!entry) + con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; amdgpu_ras_reset_gpu(adev); + } } kfree(err_data->err_addr); @@ -182,18 +195,24 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset) } if (!amdgpu_sriov_vf(adev)) { - struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_err_data err_data; struct ras_common_if head = { .block = AMDGPU_RAS_BLOCK__UMC, }; struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return ret; + ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); if (ret == AMDGPU_RAS_SUCCESS && obj) { obj->err_data.ue_count += err_data.ue_count; obj->err_data.ce_count += err_data.ce_count; } + + amdgpu_ras_error_data_fini(&err_data); } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) adev->virt.ops->ras_poison_handler(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 43321f57f..417a6726c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -32,6 +32,11 @@ * is the index of 8KB block */ #define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* + * (addr / 256) * 32768, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7) /* channel index is the index of 256B block */ #define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) /* offset in 256B block */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c new file mode 100644 index 000000000..ca45ba8ac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -0,0 +1,878 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_umsch_mm.h" +#include "umsch_mm_v4_0.h" + +struct umsch_mm_test_ctx_data { + uint8_t process_csa[PAGE_SIZE]; + uint8_t vpe_ctx_csa[PAGE_SIZE]; + uint8_t vcn_ctx_csa[PAGE_SIZE]; +}; + +struct umsch_mm_test_mqd_data { + uint8_t vpe_mqd[PAGE_SIZE]; + uint8_t vcn_mqd[PAGE_SIZE]; +}; + +struct umsch_mm_test_ring_data { + uint8_t vpe_ring[PAGE_SIZE]; + uint8_t vpe_ib[PAGE_SIZE]; + uint8_t vcn_ring[PAGE_SIZE]; + uint8_t vcn_ib[PAGE_SIZE]; +}; + +struct umsch_mm_test_queue_info { + uint64_t mqd_addr; + uint64_t csa_addr; + uint32_t doorbell_offset_0; + uint32_t doorbell_offset_1; + enum UMSCH_SWIP_ENGINE_TYPE engine; +}; + +struct umsch_mm_test { + struct amdgpu_bo *ctx_data_obj; + uint64_t ctx_data_gpu_addr; + uint32_t *ctx_data_cpu_addr; + + struct amdgpu_bo *mqd_data_obj; + uint64_t mqd_data_gpu_addr; + uint32_t *mqd_data_cpu_addr; + + struct amdgpu_bo *ring_data_obj; + uint64_t ring_data_gpu_addr; + uint32_t *ring_data_cpu_addr; + + + struct amdgpu_vm *vm; + struct amdgpu_bo_va *bo_va; + uint32_t pasid; + uint32_t vm_cntx_cntl; + uint32_t num_queues; +}; + +static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, + uint64_t addr, uint32_t size) +{ + struct amdgpu_sync sync; + struct drm_exec exec; + int r; + + amdgpu_sync_create(&sync); + + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_fini_exec; + + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_fini_exec; + } + + *bo_va = amdgpu_vm_bo_add(adev, vm, bo); + if (!*bo_va) { + r = -ENOMEM; + goto error_fini_exec; + } + + r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + + if (r) + goto error_del_bo_va; + + + r = amdgpu_vm_bo_update(adev, *bo_va, false); + if (r) + goto error_del_bo_va; + + amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update); + + r = amdgpu_vm_update_pdes(adev, vm, false); + if (r) + goto error_del_bo_va; + + amdgpu_sync_fence(&sync, vm->last_update); + + amdgpu_sync_wait(&sync, false); + drm_exec_fini(&exec); + + amdgpu_sync_free(&sync); + + return 0; + +error_del_bo_va: + amdgpu_vm_bo_del(adev, *bo_va); + amdgpu_sync_free(&sync); + +error_fini_exec: + drm_exec_fini(&exec); + amdgpu_sync_free(&sync); + return r; +} + +static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, + uint64_t addr) +{ + struct drm_exec exec; + long r; + + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; + + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; + } + + + r = amdgpu_vm_bo_unmap(adev, bo_va, addr); + if (r) + goto out_unlock; + + amdgpu_vm_bo_del(adev, bo_va); + +out_unlock: + drm_exec_fini(&exec); + + return r; +} + +static void setup_vpe_queue(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr; + uint64_t ring_gpu_addr = test->ring_data_gpu_addr; + + mqd->rb_base_lo = (ring_gpu_addr >> 8); + mqd->rb_base_hi = (ring_gpu_addr >> 40); + mqd->rb_size = PAGE_SIZE / 4; + mqd->wptr_val = 0; + mqd->rptr_val = 0; + mqd->unmapped = 1; + + qinfo->mqd_addr = test->mqd_data_gpu_addr; + qinfo->csa_addr = test->ctx_data_gpu_addr + + offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa); + qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1; + qinfo->doorbell_offset_1 = 0; +} + +static void setup_vcn_queue(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ +} + +static int add_test_queue(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + struct umsch_mm_add_queue_input queue_input = {}; + int r; + + queue_input.process_id = test->pasid; + queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo); + + queue_input.process_va_start = 0; + queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; + + queue_input.process_quantum = 100000; /* 10ms */ + queue_input.process_csa_addr = test->ctx_data_gpu_addr + + offsetof(struct umsch_mm_test_ctx_data, process_csa); + + queue_input.context_quantum = 10000; /* 1ms */ + queue_input.context_csa_addr = qinfo->csa_addr; + + queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL; + queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL; + queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0; + queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1; + + queue_input.engine_type = qinfo->engine; + queue_input.mqd_addr = qinfo->mqd_addr; + queue_input.vm_context_cntl = test->vm_cntx_cntl; + + amdgpu_umsch_mm_lock(&adev->umsch_mm); + r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input); + amdgpu_umsch_mm_unlock(&adev->umsch_mm); + if (r) + return r; + + return 0; +} + +static int remove_test_queue(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + struct umsch_mm_remove_queue_input queue_input = {}; + int r; + + queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0; + queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1; + queue_input.context_csa_addr = qinfo->csa_addr; + + amdgpu_umsch_mm_lock(&adev->umsch_mm); + r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input); + amdgpu_umsch_mm_unlock(&adev->umsch_mm); + if (r) + return r; + + return 0; +} + +static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test) +{ + struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr; + uint32_t *ring = test->ring_data_cpu_addr + + offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4; + uint32_t *ib = test->ring_data_cpu_addr + + offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4; + uint64_t ib_gpu_addr = test->ring_data_gpu_addr + + offsetof(struct umsch_mm_test_ring_data, vpe_ib); + uint32_t *fence = ib + 2048 / 4; + uint64_t fence_gpu_addr = ib_gpu_addr + 2048; + const uint32_t test_pattern = 0xdeadbeef; + int i; + + ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0); + ib[1] = lower_32_bits(fence_gpu_addr); + ib[2] = upper_32_bits(fence_gpu_addr); + ib[3] = test_pattern; + + ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0); + ring[1] = (ib_gpu_addr & 0xffffffe0); + ring[2] = upper_32_bits(ib_gpu_addr); + ring[3] = 4; + ring[4] = 0; + ring[5] = 0; + + mqd->wptr_val = (6 << 2); + // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val); + + for (i = 0; i < adev->usec_timeout; i++) { + if (*fence == test_pattern) + return 0; + udelay(1); + } + + dev_err(adev->dev, "vpe queue submission timeout\n"); + + return -ETIMEDOUT; +} + +static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test) +{ + return 0; +} + +static int setup_umsch_mm_test(struct amdgpu_device *adev, + struct umsch_mm_test *test) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + int r; + + test->vm_cntx_cntl = hub->vm_cntx_cntl; + + test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL); + if (!test->vm) { + r = -ENOMEM; + return r; + } + + r = amdgpu_vm_init(adev, test->vm, -1); + if (r) + goto error_free_vm; + + r = amdgpu_pasid_alloc(16); + if (r < 0) + goto error_fini_vm; + test->pasid = r; + + r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &test->ctx_data_obj, + &test->ctx_data_gpu_addr, + (void **)&test->ctx_data_cpu_addr); + if (r) + goto error_free_pasid; + + memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data)); + + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &test->mqd_data_obj, + &test->mqd_data_gpu_addr, + (void **)&test->mqd_data_cpu_addr); + if (r) + goto error_free_ctx_data_obj; + + memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE); + + r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &test->ring_data_obj, + NULL, + (void **)&test->ring_data_cpu_addr); + if (r) + goto error_free_mqd_data_obj; + + memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data)); + + test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; + r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va, + test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data)); + if (r) + goto error_free_ring_data_obj; + + return 0; + +error_free_ring_data_obj: + amdgpu_bo_free_kernel(&test->ring_data_obj, NULL, + (void **)&test->ring_data_cpu_addr); +error_free_mqd_data_obj: + amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr, + (void **)&test->mqd_data_cpu_addr); +error_free_ctx_data_obj: + amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr, + (void **)&test->ctx_data_cpu_addr); +error_free_pasid: + amdgpu_pasid_free(test->pasid); +error_fini_vm: + amdgpu_vm_fini(adev, test->vm); +error_free_vm: + kfree(test->vm); + + return r; +} + +static void cleanup_umsch_mm_test(struct amdgpu_device *adev, + struct umsch_mm_test *test) +{ + unmap_ring_data(adev, test->vm, test->ring_data_obj, + test->bo_va, test->ring_data_gpu_addr); + amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr, + (void **)&test->mqd_data_cpu_addr); + amdgpu_bo_free_kernel(&test->ring_data_obj, NULL, + (void **)&test->ring_data_cpu_addr); + amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr, + (void **)&test->ctx_data_cpu_addr); + amdgpu_pasid_free(test->pasid); + amdgpu_vm_fini(adev, test->vm); + kfree(test->vm); +} + +static int setup_test_queues(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + int i, r; + + for (i = 0; i < test->num_queues; i++) { + if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE) + setup_vpe_queue(adev, test, &qinfo[i]); + else + setup_vcn_queue(adev, test, &qinfo[i]); + + r = add_test_queue(adev, test, &qinfo[i]); + if (r) + return r; + } + + return 0; +} + +static int submit_test_queues(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + int i, r; + + for (i = 0; i < test->num_queues; i++) { + if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE) + r = submit_vpe_queue(adev, test); + else + r = submit_vcn_queue(adev, test); + if (r) + return r; + } + + return 0; +} + +static void cleanup_test_queues(struct amdgpu_device *adev, + struct umsch_mm_test *test, + struct umsch_mm_test_queue_info *qinfo) +{ + int i; + + for (i = 0; i < test->num_queues; i++) + remove_test_queue(adev, test, &qinfo[i]); +} + +static int umsch_mm_test(struct amdgpu_device *adev) +{ + struct umsch_mm_test_queue_info qinfo[] = { + { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE }, + }; + struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) }; + int r; + + r = setup_umsch_mm_test(adev, &test); + if (r) + return r; + + r = setup_test_queues(adev, &test, qinfo); + if (r) + goto cleanup; + + r = submit_test_queues(adev, &test, qinfo); + if (r) + goto cleanup; + + cleanup_test_queues(adev, &test, qinfo); + cleanup_umsch_mm_test(adev, &test); + + return 0; + +cleanup: + cleanup_test_queues(adev, &test, qinfo); + cleanup_umsch_mm_test(adev, &test); + return r; +} + +int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws) +{ + struct amdgpu_ring *ring = &umsch->ring; + + if (amdgpu_ring_alloc(ring, ndws)) + return -ENOMEM; + + amdgpu_ring_write_multiple(ring, pkt, ndws); + amdgpu_ring_commit(ring); + + return 0; +} + +int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_ring *ring = &umsch->ring; + struct amdgpu_device *adev = ring->adev; + int r; + + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, adev->usec_timeout); + if (r < 1) { + dev_err(adev->dev, "ring umsch timeout, emitted fence %u\n", + ring->fence_drv.sync_seq); + return -ETIMEDOUT; + } + + return 0; +} + +static void umsch_mm_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + WDOORBELL32(ring->doorbell_index, ring->wptr << 2); + else + WREG32(umsch->rb_wptr, ring->wptr << 2); +} + +static u64 umsch_mm_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; + struct amdgpu_device *adev = ring->adev; + + return RREG32(umsch->rb_rptr); +} + +static u64 umsch_mm_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; + struct amdgpu_device *adev = ring->adev; + + return RREG32(umsch->rb_wptr); +} + +static const struct amdgpu_ring_funcs umsch_v4_0_ring_funcs = { + .type = AMDGPU_RING_TYPE_UMSCH_MM, + .align_mask = 0, + .nop = 0, + .support_64bit_ptrs = false, + .get_rptr = umsch_mm_ring_get_rptr, + .get_wptr = umsch_mm_ring_get_wptr, + .set_wptr = umsch_mm_ring_set_wptr, + .insert_nop = amdgpu_ring_insert_nop, +}; + +int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm); + struct amdgpu_ring *ring = &umsch->ring; + + ring->vm_hub = AMDGPU_MMHUB0(0); + ring->use_doorbell = true; + ring->no_scheduler = true; + ring->doorbell_index = (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1) + 6; + + snprintf(ring->name, sizeof(ring->name), "umsch"); + + return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); +} + +int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) +{ + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; + struct amdgpu_device *adev = umsch->ring.adev; + const char *fw_name = NULL; + int r; + + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { + case IP_VERSION(4, 0, 5): + fw_name = "amdgpu/umsch_mm_4_0_0.bin"; + break; + default: + break; + } + + r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name); + if (r) { + release_firmware(adev->umsch_mm.fw); + adev->umsch_mm.fw = NULL; + return r; + } + + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)adev->umsch_mm.fw->data; + + adev->umsch_mm.ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); + adev->umsch_mm.data_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); + + adev->umsch_mm.irq_start_addr = + le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_lo) | + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_hi)) << 32); + adev->umsch_mm.uc_start_addr = + le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_lo) | + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_hi)) << 32); + adev->umsch_mm.data_start_addr = + le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_lo) | + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_hi)) << 32); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + struct amdgpu_firmware_info *info; + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_UCODE]; + info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_UCODE; + info->fw = adev->umsch_mm.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_DATA]; + info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_DATA; + info->fw = adev->umsch_mm.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes), PAGE_SIZE); + } + + return 0; +} + +int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch) +{ + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; + struct amdgpu_device *adev = umsch->ring.adev; + const __le32 *fw_data; + uint32_t fw_size; + int r; + + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *) + adev->umsch_mm.fw->data; + + fw_data = (const __le32 *)(adev->umsch_mm.fw->data + + le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_offset_bytes)); + fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); + + r = amdgpu_bo_create_reserved(adev, fw_size, + 4 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->umsch_mm.ucode_fw_obj, + &adev->umsch_mm.ucode_fw_gpu_addr, + (void **)&adev->umsch_mm.ucode_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create umsch_mm fw ucode bo\n", r); + return r; + } + + memcpy(adev->umsch_mm.ucode_fw_ptr, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->umsch_mm.ucode_fw_obj); + amdgpu_bo_unreserve(adev->umsch_mm.ucode_fw_obj); + return 0; +} + +int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch) +{ + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; + struct amdgpu_device *adev = umsch->ring.adev; + const __le32 *fw_data; + uint32_t fw_size; + int r; + + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *) + adev->umsch_mm.fw->data; + + fw_data = (const __le32 *)(adev->umsch_mm.fw->data + + le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes)); + fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); + + r = amdgpu_bo_create_reserved(adev, fw_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->umsch_mm.data_fw_obj, + &adev->umsch_mm.data_fw_gpu_addr, + (void **)&adev->umsch_mm.data_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create umsch_mm fw data bo\n", r); + return r; + } + + memcpy(adev->umsch_mm.data_fw_ptr, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->umsch_mm.data_fw_obj); + amdgpu_bo_unreserve(adev->umsch_mm.data_fw_obj); + return 0; +} + +int amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_device *adev = umsch->ring.adev; + struct amdgpu_firmware_info ucode = { + .ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER, + .mc_addr = adev->umsch_mm.cmd_buf_gpu_addr, + .ucode_size = ((uintptr_t)adev->umsch_mm.cmd_buf_curr_ptr - + (uintptr_t)adev->umsch_mm.cmd_buf_ptr), + }; + + return psp_execute_ip_fw_load(&adev->psp, &ucode); +} + +static void umsch_mm_agdb_index_init(struct amdgpu_device *adev) +{ + uint32_t umsch_mm_agdb_start; + int i; + + umsch_mm_agdb_start = adev->doorbell_index.max_assignment + 1; + umsch_mm_agdb_start = roundup(umsch_mm_agdb_start, 1024); + umsch_mm_agdb_start += (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1); + + for (i = 0; i < CONTEXT_PRIORITY_NUM_LEVELS; i++) + adev->umsch_mm.agdb_index[i] = umsch_mm_agdb_start + i; +} + +static int umsch_mm_init(struct amdgpu_device *adev) +{ + int r; + + adev->umsch_mm.vmid_mask_mm_vpe = 0xf00; + adev->umsch_mm.engine_mask = (1 << UMSCH_SWIP_ENGINE_TYPE_VPE); + adev->umsch_mm.vpe_hqd_mask = 0xfe; + + r = amdgpu_device_wb_get(adev, &adev->umsch_mm.wb_index); + if (r) { + dev_err(adev->dev, "failed to alloc wb for umsch: %d\n", r); + return r; + } + + adev->umsch_mm.sch_ctx_gpu_addr = adev->wb.gpu_addr + + (adev->umsch_mm.wb_index * 4); + + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->umsch_mm.cmd_buf_obj, + &adev->umsch_mm.cmd_buf_gpu_addr, + (void **)&adev->umsch_mm.cmd_buf_ptr); + if (r) { + dev_err(adev->dev, "failed to allocate cmdbuf bo %d\n", r); + amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index); + return r; + } + + mutex_init(&adev->umsch_mm.mutex_hidden); + + umsch_mm_agdb_index_init(adev); + + return 0; +} + + +static int umsch_mm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { + case IP_VERSION(4, 0, 5): + umsch_mm_v4_0_set_funcs(&adev->umsch_mm); + break; + default: + return -EINVAL; + } + + adev->umsch_mm.ring.funcs = &umsch_v4_0_ring_funcs; + umsch_mm_set_regs(&adev->umsch_mm); + + return 0; +} + +static int umsch_mm_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return umsch_mm_test(adev); +} + +static int umsch_mm_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = umsch_mm_init(adev); + if (r) + return r; + + r = umsch_mm_ring_init(&adev->umsch_mm); + if (r) + return r; + + r = umsch_mm_init_microcode(&adev->umsch_mm); + if (r) + return r; + + return 0; +} + +static int umsch_mm_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->umsch_mm.fw); + adev->umsch_mm.fw = NULL; + + amdgpu_ring_fini(&adev->umsch_mm.ring); + + mutex_destroy(&adev->umsch_mm.mutex_hidden); + + amdgpu_bo_free_kernel(&adev->umsch_mm.cmd_buf_obj, + &adev->umsch_mm.cmd_buf_gpu_addr, + (void **)&adev->umsch_mm.cmd_buf_ptr); + + amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index); + + return 0; +} + +static int umsch_mm_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = umsch_mm_load_microcode(&adev->umsch_mm); + if (r) + return r; + + umsch_mm_ring_start(&adev->umsch_mm); + + r = umsch_mm_set_hw_resources(&adev->umsch_mm); + if (r) + return r; + + return 0; +} + +static int umsch_mm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + umsch_mm_ring_stop(&adev->umsch_mm); + + amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj, + &adev->umsch_mm.data_fw_gpu_addr, + (void **)&adev->umsch_mm.data_fw_ptr); + + amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj, + &adev->umsch_mm.ucode_fw_gpu_addr, + (void **)&adev->umsch_mm.ucode_fw_ptr); + return 0; +} + +static int umsch_mm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return umsch_mm_hw_fini(adev); +} + +static int umsch_mm_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return umsch_mm_hw_init(adev); +} + +static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { + .name = "umsch_mm_v4_0", + .early_init = umsch_mm_early_init, + .late_init = umsch_mm_late_init, + .sw_init = umsch_mm_sw_init, + .sw_fini = umsch_mm_sw_fini, + .hw_init = umsch_mm_hw_init, + .hw_fini = umsch_mm_hw_fini, + .suspend = umsch_mm_suspend, + .resume = umsch_mm_resume, +}; + +const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_UMSCH_MM, + .major = 4, + .minor = 0, + .rev = 0, + .funcs = &umsch_mm_v4_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h new file mode 100644 index 000000000..8258a43a6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_UMSCH_MM_H__ +#define __AMDGPU_UMSCH_MM_H__ + +enum UMSCH_SWIP_ENGINE_TYPE { + UMSCH_SWIP_ENGINE_TYPE_VCN0 = 0, + UMSCH_SWIP_ENGINE_TYPE_VCN1 = 1, + UMSCH_SWIP_ENGINE_TYPE_VCN = 2, + UMSCH_SWIP_ENGINE_TYPE_VPE = 3, + UMSCH_SWIP_ENGINE_TYPE_MAX +}; + +enum UMSCH_SWIP_AFFINITY_TYPE { + UMSCH_SWIP_AFFINITY_TYPE_ANY = 0, + UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1, + UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2, + UMSCH_SWIP_AFFINITY_TYPE_MAX +}; + +enum UMSCH_CONTEXT_PRIORITY_LEVEL { + CONTEXT_PRIORITY_LEVEL_IDLE = 0, + CONTEXT_PRIORITY_LEVEL_NORMAL = 1, + CONTEXT_PRIORITY_LEVEL_FOCUS = 2, + CONTEXT_PRIORITY_LEVEL_REALTIME = 3, + CONTEXT_PRIORITY_NUM_LEVELS +}; + +struct umsch_mm_set_resource_input { + uint32_t vmid_mask_mm_vcn; + uint32_t vmid_mask_mm_vpe; + uint32_t logging_vmid; + uint32_t engine_mask; + union { + struct { + uint32_t disable_reset : 1; + uint32_t disable_umsch_mm_log : 1; + uint32_t reserved : 30; + }; + uint32_t uint32_all; + }; +}; + +struct umsch_mm_add_queue_input { + uint32_t process_id; + uint64_t page_table_base_addr; + uint64_t process_va_start; + uint64_t process_va_end; + uint64_t process_quantum; + uint64_t process_csa_addr; + uint64_t context_quantum; + uint64_t context_csa_addr; + uint32_t inprocess_context_priority; + enum UMSCH_CONTEXT_PRIORITY_LEVEL context_global_priority_level; + uint32_t doorbell_offset_0; + uint32_t doorbell_offset_1; + enum UMSCH_SWIP_ENGINE_TYPE engine_type; + uint32_t affinity; + enum UMSCH_SWIP_AFFINITY_TYPE affinity_type; + uint64_t mqd_addr; + uint64_t h_context; + uint64_t h_queue; + uint32_t vm_context_cntl; + + struct { + uint32_t is_context_suspended : 1; + uint32_t reserved : 31; + }; +}; + +struct umsch_mm_remove_queue_input { + uint32_t doorbell_offset_0; + uint32_t doorbell_offset_1; + uint64_t context_csa_addr; +}; + +struct MQD_INFO { + uint32_t rb_base_hi; + uint32_t rb_base_lo; + uint32_t rb_size; + uint32_t wptr_val; + uint32_t rptr_val; + uint32_t unmapped; +}; + +struct amdgpu_umsch_mm; + +struct umsch_mm_funcs { + int (*set_hw_resources)(struct amdgpu_umsch_mm *umsch); + int (*add_queue)(struct amdgpu_umsch_mm *umsch, + struct umsch_mm_add_queue_input *input); + int (*remove_queue)(struct amdgpu_umsch_mm *umsch, + struct umsch_mm_remove_queue_input *input); + int (*set_regs)(struct amdgpu_umsch_mm *umsch); + int (*init_microcode)(struct amdgpu_umsch_mm *umsch); + int (*load_microcode)(struct amdgpu_umsch_mm *umsch); + int (*ring_init)(struct amdgpu_umsch_mm *umsch); + int (*ring_start)(struct amdgpu_umsch_mm *umsch); + int (*ring_stop)(struct amdgpu_umsch_mm *umsch); + int (*ring_fini)(struct amdgpu_umsch_mm *umsch); +}; + +struct amdgpu_umsch_mm { + struct amdgpu_ring ring; + + uint32_t rb_wptr; + uint32_t rb_rptr; + + const struct umsch_mm_funcs *funcs; + + const struct firmware *fw; + uint32_t fw_version; + uint32_t feature_version; + + struct amdgpu_bo *ucode_fw_obj; + uint64_t ucode_fw_gpu_addr; + uint32_t *ucode_fw_ptr; + uint64_t irq_start_addr; + uint64_t uc_start_addr; + uint32_t ucode_size; + + struct amdgpu_bo *data_fw_obj; + uint64_t data_fw_gpu_addr; + uint32_t *data_fw_ptr; + uint64_t data_start_addr; + uint32_t data_size; + + struct amdgpu_bo *cmd_buf_obj; + uint64_t cmd_buf_gpu_addr; + uint32_t *cmd_buf_ptr; + uint32_t *cmd_buf_curr_ptr; + + uint32_t wb_index; + uint64_t sch_ctx_gpu_addr; + uint32_t *sch_ctx_cpu_addr; + + uint32_t vmid_mask_mm_vcn; + uint32_t vmid_mask_mm_vpe; + uint32_t engine_mask; + uint32_t vcn0_hqd_mask; + uint32_t vcn1_hqd_mask; + uint32_t vcn_hqd_mask[2]; + uint32_t vpe_hqd_mask; + uint32_t agdb_index[CONTEXT_PRIORITY_NUM_LEVELS]; + + struct mutex mutex_hidden; +}; + +int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws); +int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch); + +int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch); +int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch); +int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch); + +int amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm *umsch); + +int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch); + +#define WREG32_SOC15_UMSCH(reg, value) \ + do { \ + uint32_t reg_offset = adev->reg_offset[VCN_HWIP][0][reg##_BASE_IDX] + reg; \ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { \ + *adev->umsch_mm.cmd_buf_curr_ptr++ = (reg_offset << 2); \ + *adev->umsch_mm.cmd_buf_curr_ptr++ = value; \ + } else { \ + WREG32(reg_offset, value); \ + } \ + } while (0) + +#define umsch_mm_set_hw_resources(umsch) \ + ((umsch)->funcs->set_hw_resources ? (umsch)->funcs->set_hw_resources((umsch)) : 0) +#define umsch_mm_add_queue(umsch, input) \ + ((umsch)->funcs->add_queue ? (umsch)->funcs->add_queue((umsch), (input)) : 0) +#define umsch_mm_remove_queue(umsch, input) \ + ((umsch)->funcs->remove_queue ? (umsch)->funcs->remove_queue((umsch), (input)) : 0) + +#define umsch_mm_set_regs(umsch) \ + ((umsch)->funcs->set_regs ? (umsch)->funcs->set_regs((umsch)) : 0) +#define umsch_mm_init_microcode(umsch) \ + ((umsch)->funcs->init_microcode ? (umsch)->funcs->init_microcode((umsch)) : 0) +#define umsch_mm_load_microcode(umsch) \ + ((umsch)->funcs->load_microcode ? (umsch)->funcs->load_microcode((umsch)) : 0) + +#define umsch_mm_ring_init(umsch) \ + ((umsch)->funcs->ring_init ? (umsch)->funcs->ring_init((umsch)) : 0) +#define umsch_mm_ring_start(umsch) \ + ((umsch)->funcs->ring_start ? (umsch)->funcs->ring_start((umsch)) : 0) +#define umsch_mm_ring_stop(umsch) \ + ((umsch)->funcs->ring_stop ? (umsch)->funcs->ring_stop((umsch)) : 0) +#define umsch_mm_ring_fini(umsch) \ + ((umsch)->funcs->ring_fini ? (umsch)->funcs->ring_fini((umsch)) : 0) + +static inline void amdgpu_umsch_mm_lock(struct amdgpu_umsch_mm *umsch) +{ + mutex_lock(&umsch->mutex_hidden); +} + +static inline void amdgpu_umsch_mm_unlock(struct amdgpu_umsch_mm *umsch) +{ + mutex_unlock(&umsch->mutex_hidden); +} + +extern const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b7441654e..07d930339 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -398,32 +398,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) * amdgpu_uvd_entity_init - init entity * * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer to check * + * Initialize the entity used for handle management in the kernel driver. */ -int amdgpu_uvd_entity_init(struct amdgpu_device *adev) +int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - struct amdgpu_ring *ring; - struct drm_gpu_scheduler *sched; - int r; + if (ring == &adev->uvd.inst[0].ring) { + struct drm_gpu_scheduler *sched = &ring->sched; + int r; - ring = &adev->uvd.inst[0].ring; - sched = &ring->sched; - r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); - if (r) { - DRM_ERROR("Failed setting up UVD kernel entity.\n"); - return r; + r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + if (r) { + DRM_ERROR("Failed setting up UVD kernel entity.\n"); + return r; + } } return 0; } -int amdgpu_uvd_suspend(struct amdgpu_device *adev) +int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev) { unsigned int size; void *ptr; int i, j, idx; - bool in_ras_intr = amdgpu_ras_intr_triggered(); cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -452,7 +452,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (drm_dev_enter(adev_to_drm(adev), &idx)) { /* re-write 0 since err_event_athub will corrupt VCPU buffer */ - if (in_ras_intr) + if (amdgpu_ras_intr_triggered()) memset(adev->uvd.inst[j].saved_bo, 0, size); else memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); @@ -461,7 +461,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) } } - if (in_ras_intr) + return 0; +} + +int amdgpu_uvd_suspend(struct amdgpu_device *adev) +{ + if (amdgpu_ras_intr_triggered()) DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 9f89bb7cd..9dfad2f48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -73,7 +73,8 @@ struct amdgpu_uvd { int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); -int amdgpu_uvd_entity_init(struct amdgpu_device *adev); +int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); +int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 1904edf68..59acf424a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -230,21 +230,22 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) * amdgpu_vce_entity_init - init entity * * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer to check * + * Initialize the entity used for handle management in the kernel driver. */ -int amdgpu_vce_entity_init(struct amdgpu_device *adev) +int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - struct amdgpu_ring *ring; - struct drm_gpu_scheduler *sched; - int r; - - ring = &adev->vce.ring[0]; - sched = &ring->sched; - r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCE run queue.\n"); - return r; + if (ring == &adev->vce.ring[0]) { + struct drm_gpu_scheduler *sched = &ring->sched; + int r; + + r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + if (r != 0) { + DRM_ERROR("Failed setting up VCE run queue.\n"); + return r; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index ea680fc9a..6e53f872d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -55,7 +55,7 @@ struct amdgpu_vce { int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size); int amdgpu_vce_sw_fini(struct amdgpu_device *adev); -int amdgpu_vce_entity_init(struct amdgpu_device *adev); +int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 03b4bcfca..f4963330c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -58,6 +58,7 @@ #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" #define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" +#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -80,6 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -124,7 +126,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) * Hence, check for these versions here - notice this is * restricted to Vangogh (Deck's APU). */ - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) { const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || @@ -169,7 +171,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); } else { @@ -265,7 +267,7 @@ static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; bool ret = false; - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) ret = true; return ret; @@ -1003,7 +1005,7 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; long r; - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) { r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); if (r) goto error; @@ -1053,7 +1055,8 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == + IP_VERSION(4, 0, 3)) break; } dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); @@ -1094,7 +1097,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, if (write_pos > read_pos) { available = write_pos - read_pos; - read_num[0] = min(size, (size_t)available); + read_num[0] = min_t(size_t, size, available); } else { read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; available = read_num[0] + write_pos - plog->header_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index a3eed90b6..514c98ea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -33,7 +33,7 @@ #define AMDGPU_VCN_MAX_ENC_RINGS 3 #define AMDGPU_MAX_VCN_INSTANCES 4 -#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES +#define AMDGPU_MAX_VCN_ENC_RINGS (AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES) #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) @@ -169,6 +169,9 @@ #define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11) #define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11) #define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14) +#define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15) + +#define MAX_NUM_VCN_RB_SETUP 4 #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001 @@ -335,15 +338,30 @@ struct amdgpu_fw_shared { struct amdgpu_fw_shared_smu_interface_info smu_interface_info; }; +struct amdgpu_vcn_rb_setup_info { + uint32_t rb_addr_lo; + uint32_t rb_addr_hi; + uint32_t rb_size; +}; + struct amdgpu_fw_shared_rb_setup { uint32_t is_rb_enabled_flags; - uint32_t rb_addr_lo; - uint32_t rb_addr_hi; - uint32_t rb_size; - uint32_t rb4_addr_lo; - uint32_t rb4_addr_hi; - uint32_t rb4_size; - uint32_t reserved[6]; + + union { + struct { + uint32_t rb_addr_lo; + uint32_t rb_addr_hi; + uint32_t rb_size; + uint32_t rb4_addr_lo; + uint32_t rb4_addr_hi; + uint32_t rb4_size; + uint32_t reserved[6]; + }; + + struct { + struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP]; + }; + }; }; struct amdgpu_fw_shared_drm_key_wa { @@ -351,6 +369,11 @@ struct amdgpu_fw_shared_drm_key_wa { uint8_t reserved[3]; }; +struct amdgpu_fw_shared_queue_decouple { + uint8_t is_enabled; + uint8_t reserved[7]; +}; + struct amdgpu_vcn4_fw_shared { uint32_t present_flag_0; uint8_t pad[12]; @@ -361,6 +384,8 @@ struct amdgpu_vcn4_fw_shared { struct amdgpu_fw_shared_rb_setup rb_setup; struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; struct amdgpu_fw_shared_drm_key_wa drm_key_wa; + uint8_t pad3[9]; + struct amdgpu_fw_shared_queue_decouple decouple; }; struct amdgpu_vcn_fwlog { @@ -378,6 +403,15 @@ struct amdgpu_vcn_decode_buffer { uint32_t pad[30]; }; +struct amdgpu_vcn_rb_metadata { + uint32_t size; + uint32_t present_flag_0; + + uint8_t version; + uint8_t ring_id; + uint8_t pad[26]; +}; + #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 #define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 #define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 96857ae7f..3a632c3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -73,9 +73,10 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, - uint32_t ref, uint32_t mask) + uint32_t ref, uint32_t mask, + uint32_t xcc_inst) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; struct amdgpu_ring *ring = &kiq->ring; signed long r, cnt = 0; unsigned long flags; @@ -837,7 +838,7 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad void amdgpu_virt_post_reset(struct amdgpu_device *adev) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) { /* force set to GFXOFF state after reset, * to avoid some invalid operation before GC enable */ @@ -847,7 +848,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev) bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) { - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 0): /* no vf autoload, white list */ if (ucode_id == AMDGPU_UCODE_ID_VCN1 || @@ -942,7 +943,7 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, } } -static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag) { @@ -975,7 +976,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, return ret; } -static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) +u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; uint32_t timeout = 50000; @@ -1093,3 +1094,13 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, else return RREG32(offset); } + +bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) +{ + bool xnack_mode = true; + + if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + xnack_mode = false; + + return xnack_mode; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index fabb83e9d..d4207e441 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -126,6 +126,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), /* AV1 Support MODE*/ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), + /* VCN RB decouple */ + AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -326,11 +328,14 @@ static inline bool is_virtual_machine(void) ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) #define amdgpu_sriov_is_av1_support(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) +#define amdgpu_sriov_is_vcn_rb_decouple(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, - uint32_t ref, uint32_t mask); + uint32_t ref, uint32_t mask, + uint32_t xcc_inst); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); @@ -361,4 +366,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id); void amdgpu_virt_post_reset(struct amdgpu_device *adev); +bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev); +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, + u32 acc_flags, u32 hwip, + bool write, u32 *rlcg_flag); +u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9fe1278fd..5baefb548 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -845,6 +845,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * @immediate: immediate submission in a page fault * @unlocked: unlocked invalidation during MM callback * @flush_tlb: trigger tlb invalidation after update completed + * @allow_override: change MTYPE for local NUMA nodes * @resv: fences we need to sync to * @start: start of mapped range * @last: last mapped entry @@ -861,7 +862,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * 0 for success, negative erro code for failure. */ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, @@ -886,12 +887,12 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, * heavy-weight flush TLB unconditionally. */ flush_tlb |= adev->gmc.xgmi.num_physical_nodes && - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0); /* * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB */ - flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); + flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0); memset(¶ms, 0, sizeof(params)); params.adev = adev; @@ -899,6 +900,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.immediate = immediate; params.pages_addr = pages_addr; params.unlocked = unlocked; + params.allow_override = allow_override; /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1074,6 +1076,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct ttm_resource *mem; struct dma_fence **last_update; bool flush_tlb = clear; + bool uncached; struct dma_resv *resv; uint64_t vram_base; uint64_t flags; @@ -1111,9 +1114,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); vram_base = bo_adev->vm_manager.vram_base_offset; + uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; } else { flags = 0x0; vram_base = 0; + uncached = false; } if (clear || (bo && bo->tbo.base.resv == @@ -1147,7 +1152,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, trace_amdgpu_vm_bo_update(mapping); r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, - resv, mapping->start, mapping->last, + !uncached, resv, mapping->start, mapping->last, update_flags, mapping->offset, vram_base, mem, pages_addr, last_update); @@ -1342,8 +1347,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, - mapping->start, mapping->last, + r = amdgpu_vm_update_range(adev, vm, false, false, true, false, + resv, mapping->start, mapping->last, init_pte_value, 0, 0, NULL, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); @@ -1369,6 +1374,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm + * @ticket: optional reservation ticket used to reserve the VM * * Make sure all BOs which are moved are updated in the PTs. * @@ -1378,11 +1384,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * PTs have to be reserved! */ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket) { struct amdgpu_bo_va *bo_va; struct dma_resv *resv; - bool clear; + bool clear, unlock; int r; spin_lock(&vm->status_lock); @@ -1405,17 +1412,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); /* Try to reserve the BO to avoid clearing its ptes */ - if (!amdgpu_vm_debug && dma_resv_trylock(resv)) + if (!adev->debug_vm && dma_resv_trylock(resv)) { clear = false; + unlock = true; + /* The caller is already holding the reservation lock */ + } else if (ticket && dma_resv_locking_ctx(resv) == ticket) { + clear = false; + unlock = false; /* Somebody else is using the BO right now */ - else + } else { clear = true; + unlock = false; + } r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; - if (!clear) + if (unlock) dma_resv_unlock(resv); spin_lock(&vm->status_lock); } @@ -2061,7 +2075,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, if (amdgpu_vm_block_size != -1) tmp >>= amdgpu_vm_block_size - 9; tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; - adev->vm_manager.num_level = min(max_level, (unsigned)tmp); + adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp); switch (adev->vm_manager.num_level) { case 3: adev->vm_manager.root_level = AMDGPU_VM_PDB2; @@ -2620,8 +2634,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, goto error_unlock; } - r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, - addr, flags, value, 0, NULL, NULL, NULL); + r = amdgpu_vm_update_range(adev, vm, true, false, false, false, + NULL, addr, addr, flags, value, 0, NULL, NULL, NULL); if (r) goto error_unlock; @@ -2733,3 +2747,53 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) total_done_objs); } #endif + +/** + * amdgpu_vm_update_fault_cache - update cached fault into. + * @adev: amdgpu device pointer + * @pasid: PASID of the VM + * @addr: Address of the fault + * @status: GPUVM fault status register + * @vmhub: which vmhub got the fault + * + * Cache the fault info for later use by userspace in debugging. + */ +void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, + unsigned int pasid, + uint64_t addr, + uint32_t status, + unsigned int vmhub) +{ + struct amdgpu_vm *vm; + unsigned long flags; + + xa_lock_irqsave(&adev->vm_manager.pasids, flags); + + vm = xa_load(&adev->vm_manager.pasids, pasid); + /* Don't update the fault cache if status is 0. In the multiple + * fault case, subsequent faults will return a 0 status which is + * useless for userspace and replaces the useful fault status, so + * only update if status is non-0. + */ + if (vm && status) { + vm->fault_info.addr = addr; + vm->fault_info.status = status; + if (AMDGPU_IS_GFXHUB(vmhub)) { + vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; + vm->fault_info.vmhub |= + (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT; + } else if (AMDGPU_IS_MMHUB0(vmhub)) { + vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; + vm->fault_info.vmhub |= + (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT; + } else if (AMDGPU_IS_MMHUB1(vmhub)) { + vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; + vm->fault_info.vmhub |= + (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT; + } else { + WARN_ONCE(1, "Invalid vmhub %u\n", vmhub); + } + } + xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 204ab1318..2cd86d2bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -124,9 +124,16 @@ struct amdgpu_mem_stats; * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 */ #define AMDGPU_MAX_VMHUBS 13 -#define AMDGPU_GFXHUB(x) (x) -#define AMDGPU_MMHUB0(x) (8 + x) -#define AMDGPU_MMHUB1(x) (8 + 4 + x) +#define AMDGPU_GFXHUB_START 0 +#define AMDGPU_MMHUB0_START 8 +#define AMDGPU_MMHUB1_START 12 +#define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x)) +#define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x)) +#define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x)) + +#define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START) +#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START) +#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS) /* Reserve 2MB at top/bottom of address space for kernel use */ #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) @@ -239,6 +246,12 @@ struct amdgpu_vm_update_params { * @table_freed: return true if page table is freed when updating */ bool table_freed; + + /** + * @allow_override: true for memory that is not uncached: allows MTYPE + * to be overridden for NUMA local memory. + */ + bool allow_override; }; struct amdgpu_vm_update_funcs { @@ -252,6 +265,15 @@ struct amdgpu_vm_update_funcs { struct dma_fence **fence); }; +struct amdgpu_vm_fault_info { + /* fault address */ + uint64_t addr; + /* fault status register */ + uint32_t status; + /* which vmhub? gfxhub, mmhub, etc. */ + unsigned int vmhub; +}; + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; @@ -343,6 +365,9 @@ struct amdgpu_vm { /* Memory partition number, -1 means any partition */ int8_t mem_id; + + /* cached fault info */ + struct amdgpu_vm_fault_info fault_info; }; struct amdgpu_vm_manager { @@ -418,11 +443,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm); + struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, @@ -554,4 +580,10 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) mutex_unlock(&vm->eviction_lock); } +void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, + unsigned int pasid, + uint64_t addr, + uint32_t status, + unsigned int vmhub); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 0d51222f6..a160265dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -844,14 +844,8 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, */ if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) && adev->gmc.gmc_funcs->override_vm_pte_flags && - num_possible_nodes() > 1) { - if (!params->pages_addr) - amdgpu_gmc_override_vm_pte_flags(adev, params->vm, - addr, &flags); - else - dev_dbg(adev->dev, - "override_vm_pte_flags skipped: non-contiguous\n"); - } + num_possible_nodes() > 1 && !params->pages_addr && params->allow_override) + amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags); params->vm->update_funcs->update(params, pt, pe, addr, count, incr, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c new file mode 100644 index 000000000..e81579708 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -0,0 +1,656 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_vpe.h" +#include "soc15_common.h" +#include "vpe_v6_1.h" + +#define AMDGPU_CSA_VPE_SIZE 64 +/* VPE CSA resides in the 4th page of CSA */ +#define AMDGPU_CSA_VPE_OFFSET (4096 * 3) + +static void vpe_set_ring_funcs(struct amdgpu_device *adev); + +int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev) +{ + struct amdgpu_firmware_info ucode = { + .ucode_id = AMDGPU_UCODE_ID_VPE, + .mc_addr = adev->vpe.cmdbuf_gpu_addr, + .ucode_size = 8, + }; + + return psp_execute_ip_fw_load(&adev->psp, &ucode); +} + +int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = vpe->ring.adev; + const struct vpe_firmware_header_v1_0 *vpe_hdr; + char fw_prefix[32], fw_name[64]; + int ret; + + amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix)); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix); + + ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name); + if (ret) + goto out; + + vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; + adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); + adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + struct amdgpu_firmware_info *info; + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX]; + info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX; + info->fw = adev->vpe.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL]; + info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL; + info->fw = adev->vpe.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE); + } + + return 0; +out: + dev_err(adev->dev, "fail to initialize vpe microcode\n"); + release_firmware(adev->vpe.fw); + adev->vpe.fw = NULL; + return ret; +} + +int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); + struct amdgpu_ring *ring = &vpe->ring; + int ret; + + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->vm_hub = AMDGPU_MMHUB0(0); + ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1); + snprintf(ring->name, 4, "vpe"); + + ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (ret) + return ret; + + return 0; +} + +int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) +{ + amdgpu_ring_fini(&vpe->ring); + + return 0; +} + +static int vpe_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { + case IP_VERSION(6, 1, 0): + vpe_v6_1_set_funcs(vpe); + break; + default: + return -EINVAL; + } + + vpe_set_ring_funcs(adev); + vpe_set_regs(vpe); + + return 0; +} + + +static int vpe_common_init(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); + int r; + + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->vpe.cmdbuf_obj, + &adev->vpe.cmdbuf_gpu_addr, + (void **)&adev->vpe.cmdbuf_cpu_addr); + if (r) { + dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r); + return r; + } + + return 0; +} + +static int vpe_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + int ret; + + ret = vpe_common_init(vpe); + if (ret) + goto out; + + ret = vpe_irq_init(vpe); + if (ret) + goto out; + + ret = vpe_ring_init(vpe); + if (ret) + goto out; + + ret = vpe_init_microcode(vpe); + if (ret) + goto out; +out: + return ret; +} + +static int vpe_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + + release_firmware(vpe->fw); + vpe->fw = NULL; + + vpe_ring_fini(vpe); + + amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, + &adev->vpe.cmdbuf_gpu_addr, + (void **)&adev->vpe.cmdbuf_cpu_addr); + + return 0; +} + +static int vpe_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + int ret; + + ret = vpe_load_microcode(vpe); + if (ret) + return ret; + + ret = vpe_ring_start(vpe); + if (ret) + return ret; + + return 0; +} + +static int vpe_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + + vpe_ring_stop(vpe); + + return 0; +} + +static int vpe_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vpe_hw_fini(adev); +} + +static int vpe_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vpe_hw_init(adev); +} + +static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + for (i = 0; i < count; i++) + if (i == 0) + amdgpu_ring_write(ring, ring->funcs->nop | + VPE_CMD_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->funcs->nop); +} + +static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t index = 0; + uint64_t csa_mc_addr; + + if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) + return 0; + + csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET + + index * AMDGPU_CSA_VPE_SIZE; + + return csa_mc_addr; +} + +static void vpe_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + uint32_t vmid = AMDGPU_JOB_GET_VMID(job); + uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid); + + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) | + VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf)); + + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); + amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); +} + +static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, + uint64_t seq, unsigned int flags) +{ + int i = 0; + + do { + /* write the fence */ + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); + /* zero in first two bits */ + WARN_ON_ONCE(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq)); + addr += 4; + } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1)); + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* generate an interrupt */ + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0)); + amdgpu_ring_write(ring, 0); + } + +} + +static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + /* wait for idle */ + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, + VPE_POLL_REGMEM_SUBOP_REGMEM) | + VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ + VPE_CMD_POLL_REGMEM_HEADER_MEM(1)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, seq); /* reference */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4)); +} + +static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, val); +} + +static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, + VPE_POLL_REGMEM_SUBOP_REGMEM) | + VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ + VPE_CMD_POLL_REGMEM_HEADER_MEM(0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); /* reference */ + amdgpu_ring_write(ring, mask); /* mask */ + amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10)); +} + +static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, + uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); +} + +static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring) +{ + unsigned int ret; + + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, 1); + ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ + amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ + + return ret; +} + +static void vpe_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset) +{ + unsigned int cur; + + WARN_ON_ONCE(offset > ring->buf_mask); + WARN_ON_ONCE(ring->ring[offset] != 0x55aa55aa); + + cur = (ring->wptr - 1) & ring->buf_mask; + if (cur > offset) + ring->ring[offset] = cur - offset; + else + ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; +} + +static int vpe_ring_preempt_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + uint32_t preempt_reg = vpe->regs.queue0_preempt; + int i, r = 0; + + /* assert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, false); + + /* emit the trailing fence */ + ring->trail_seq += 1; + amdgpu_ring_alloc(ring, 10); + vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0); + amdgpu_ring_commit(ring); + + /* assert IB preemption */ + WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); + + /* poll the trailing fence */ + for (i = 0; i < adev->usec_timeout; i++) { + if (ring->trail_seq == + le32_to_cpu(*(ring->trail_fence_cpu_addr))) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + r = -EINVAL; + dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx); + } + + /* deassert IB preemption */ + WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); + + /* deassert the preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, true); + + return r; +} + +static int vpe_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int vpe_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + uint64_t rptr; + + if (ring->use_doorbell) { + rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr); + dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr); + } else { + rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); + rptr = rptr << 32; + rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); + dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr); + } + + return (rptr >> 2); +} + +static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + uint64_t wptr; + + if (ring->use_doorbell) { + wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); + dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr); + } else { + wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); + wptr = wptr << 32; + wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); + dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr); + } + + return (wptr >> 2); +} + +static void vpe_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + + if (ring->use_doorbell) { + dev_dbg(adev->dev, "Using doorbell, \ + wptr_offs == 0x%08x, \ + lower_32_bits(ring->wptr) << 2 == 0x%08x, \ + upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + dev_dbg(adev->dev, "Not using doorbell, \ + regVPEC_QUEUE0_RB_WPTR == 0x%08x, \ + regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n", + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo), + lower_32_bits(ring->wptr << 2)); + WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi), + upper_32_bits(ring->wptr << 2)); + } +} + +static int vpe_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + const uint32_t test_pattern = 0xdeadbeef; + uint32_t index, i; + uint64_t wb_addr; + int ret; + + ret = amdgpu_device_wb_get(adev, &index); + if (ret) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); + return ret; + } + + adev->wb.wb[index] = 0; + wb_addr = adev->wb.gpu_addr + (index * 4); + + ret = amdgpu_ring_alloc(ring, 4); + if (ret) { + dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret); + goto out; + } + + amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); + amdgpu_ring_write(ring, lower_32_bits(wb_addr)); + amdgpu_ring_write(ring, upper_32_bits(wb_addr)); + amdgpu_ring_write(ring, test_pattern); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + if (le32_to_cpu(adev->wb.wb[index]) == test_pattern) + goto out; + udelay(1); + } + + ret = -ETIMEDOUT; +out: + amdgpu_device_wb_free(adev, index); + + return ret; +} + +static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + const uint32_t test_pattern = 0xdeadbeef; + struct amdgpu_ib ib = {}; + struct dma_fence *f = NULL; + uint32_t index; + uint64_t wb_addr; + int ret; + + ret = amdgpu_device_wb_get(adev, &index); + if (ret) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); + return ret; + } + + adev->wb.wb[index] = 0; + wb_addr = adev->wb.gpu_addr + (index * 4); + + ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); + if (ret) + goto err0; + + ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0); + ib.ptr[1] = lower_32_bits(wb_addr); + ib.ptr[2] = upper_32_bits(wb_addr); + ib.ptr[3] = test_pattern; + ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); + ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); + ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); + ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); + ib.length_dw = 8; + + ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (ret) + goto err1; + + ret = dma_fence_wait_timeout(f, false, timeout); + if (ret <= 0) { + ret = ret ? : -ETIMEDOUT; + goto err1; + } + + ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL; + +err1: + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err0: + amdgpu_device_wb_free(adev, index); + + return ret; +} + +static const struct amdgpu_ring_funcs vpe_ring_funcs = { + .type = AMDGPU_RING_TYPE_VPE, + .align_mask = 0xf, + .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0), + .support_64bit_ptrs = true, + .get_rptr = vpe_ring_get_rptr, + .get_wptr = vpe_ring_get_wptr, + .set_wptr = vpe_ring_set_wptr, + .emit_frame_size = + 5 + /* vpe_ring_init_cond_exec */ + 6 + /* vpe_ring_emit_pipeline_sync */ + 10 + 10 + 10 + /* vpe_ring_emit_fence */ + /* vpe_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6, + .emit_ib_size = 7 + 6, + .emit_ib = vpe_ring_emit_ib, + .emit_pipeline_sync = vpe_ring_emit_pipeline_sync, + .emit_fence = vpe_ring_emit_fence, + .emit_vm_flush = vpe_ring_emit_vm_flush, + .emit_wreg = vpe_ring_emit_wreg, + .emit_reg_wait = vpe_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .insert_nop = vpe_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .test_ring = vpe_ring_test_ring, + .test_ib = vpe_ring_test_ib, + .init_cond_exec = vpe_ring_init_cond_exec, + .patch_cond_exec = vpe_ring_patch_cond_exec, + .preempt_ib = vpe_ring_preempt_ib, +}; + +static void vpe_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->vpe.ring.funcs = &vpe_ring_funcs; +} + +const struct amd_ip_funcs vpe_ip_funcs = { + .name = "vpe_v6_1", + .early_init = vpe_early_init, + .late_init = NULL, + .sw_init = vpe_sw_init, + .sw_fini = vpe_sw_fini, + .hw_init = vpe_hw_init, + .hw_fini = vpe_hw_fini, + .suspend = vpe_suspend, + .resume = vpe_resume, + .soft_reset = NULL, + .set_clockgating_state = vpe_set_clockgating_state, + .set_powergating_state = vpe_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vpe_v6_1_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VPE, + .major = 6, + .minor = 1, + .rev = 0, + .funcs = &vpe_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h new file mode 100644 index 000000000..29d56f7ae --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -0,0 +1,91 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_VPE_H__ +#define __AMDGPU_VPE_H__ + +#include "amdgpu_ring.h" +#include "amdgpu_irq.h" +#include "vpe_6_1_fw_if.h" + +struct amdgpu_vpe; + +struct vpe_funcs { + uint32_t (*get_reg_offset)(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset); + int (*set_regs)(struct amdgpu_vpe *vpe); + int (*irq_init)(struct amdgpu_vpe *vpe); + int (*init_microcode)(struct amdgpu_vpe *vpe); + int (*load_microcode)(struct amdgpu_vpe *vpe); + int (*ring_init)(struct amdgpu_vpe *vpe); + int (*ring_start)(struct amdgpu_vpe *vpe); + int (*ring_stop)(struct amdgpu_vpe *vpe); + int (*ring_fini)(struct amdgpu_vpe *vpe); +}; + +struct vpe_regs { + uint32_t queue0_rb_rptr_lo; + uint32_t queue0_rb_rptr_hi; + uint32_t queue0_rb_wptr_lo; + uint32_t queue0_rb_wptr_hi; + uint32_t queue0_preempt; +}; + +struct amdgpu_vpe { + struct amdgpu_ring ring; + struct amdgpu_irq_src trap_irq; + + const struct vpe_funcs *funcs; + struct vpe_regs regs; + + const struct firmware *fw; + uint32_t fw_version; + uint32_t feature_version; + + struct amdgpu_bo *cmdbuf_obj; + uint64_t cmdbuf_gpu_addr; + uint32_t *cmdbuf_cpu_addr; +}; + +int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); +int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); +int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); +int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); + +#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) +#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) +#define vpe_ring_stop(vpe) ((vpe)->funcs->ring_stop ? (vpe)->funcs->ring_stop((vpe)) : 0) +#define vpe_ring_fini(vpe) ((vpe)->funcs->ring_fini ? (vpe)->funcs->ring_fini((vpe)) : 0) + +#define vpe_get_reg_offset(vpe, inst, offset) \ + ((vpe)->funcs->get_reg_offset ? (vpe)->funcs->get_reg_offset((vpe), (inst), (offset)) : 0) +#define vpe_set_regs(vpe) \ + ((vpe)->funcs->set_regs ? (vpe)->funcs->set_regs((vpe)) : 0) +#define vpe_irq_init(vpe) \ + ((vpe)->funcs->irq_init ? (vpe)->funcs->irq_init((vpe)) : 0) +#define vpe_init_microcode(vpe) \ + ((vpe)->funcs->init_microcode ? (vpe)->funcs->init_microcode((vpe)) : 0) +#define vpe_load_microcode(vpe) \ + ((vpe)->funcs->load_microcode ? (vpe)->funcs->load_microcode((vpe)) : 0) + +extern const struct amdgpu_ip_block_version vpe_v6_1_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index c7085a747..8db880244 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -77,7 +77,16 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) return true; } +static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) +{ + struct drm_buddy_block *block; + u64 size = 0; + list_for_each_entry(block, head, link) + size += amdgpu_vram_mgr_block_size(block); + + return size; +} /** * DOC: mem_info_vram_total @@ -212,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = { NULL }; +static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (attr == &dev_attr_mem_info_vram_vendor.attr && + !adev->gmc.vram_vendor) + return 0; + + return attr->mode; +} + const struct attribute_group amdgpu_vram_mgr_attr_group = { - .attrs = amdgpu_vram_mgr_attributes + .attrs = amdgpu_vram_mgr_attributes, + .is_visible = amdgpu_vram_attrs_is_visible }; /** @@ -424,9 +448,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_resource **res) { - u64 vis_usage = 0, max_bytes, cur_size, min_block_size; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); + u64 vis_usage = 0, max_bytes, min_block_size; struct amdgpu_vram_mgr_resource *vres; u64 size, remaining_size, lpfn, fpfn; struct drm_buddy *mm = &mgr->mm; @@ -474,6 +498,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) + vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; + if (fpfn || lpfn != mgr->mm.size) /* Allocate blocks in desired range */ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; @@ -496,25 +523,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) min_block_size = (u64)pages_per_block << PAGE_SHIFT; - cur_size = size; - - if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) { - /* - * Except for actual range allocation, modify the size and - * min_block_size conforming to continuous flag enablement - */ - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - size = roundup_pow_of_two(size); - min_block_size = size; - /* - * Modify the size value if size is not - * aligned with min_block_size - */ - } else if (!IS_ALIGNED(size, min_block_size)) { - size = round_up(size, min_block_size); - } - } - r = drm_buddy_alloc_blocks(mm, fpfn, lpfn, size, @@ -531,41 +539,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, } mutex_unlock(&mgr->lock); - if (cur_size != size) { - struct drm_buddy_block *block; - struct list_head *trim_list; - u64 original_size; - LIST_HEAD(temp); - - trim_list = &vres->blocks; - original_size = (u64)vres->base.size; - - /* - * If size value is rounded up to min_block_size, trim the last - * block to the required size - */ - if (!list_is_singular(&vres->blocks)) { - block = list_last_entry(&vres->blocks, typeof(*block), link); - list_move_tail(&block->link, &temp); - trim_list = &temp; - /* - * Compute the original_size value by subtracting the - * last block size with (aligned size - original size) - */ - original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size); - } - - mutex_lock(&mgr->lock); - drm_buddy_block_trim(mm, - original_size, - trim_list); - mutex_unlock(&mgr->lock); - - if (!list_empty(&temp)) - list_splice_tail(trim_list, &vres->blocks); - } - vres->base.start = 0; + size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), + vres->base.size); list_for_each_entry(block, &vres->blocks, link) { unsigned long start; @@ -573,8 +549,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, amdgpu_vram_mgr_block_size(block); start >>= PAGE_SHIFT; - if (start > PFN_UP(vres->base.size)) - start -= PFN_UP(vres->base.size); + if (start > PFN_UP(size)) + start -= PFN_UP(size); else start = 0; vres->base.start = max(vres->base.start, start); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 565a1fa43..2b99eed5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -163,16 +163,11 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) return 0; } -int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, + int mode) { int ret, curr_mode, num_xcps = 0; - if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) - return -EINVAL; - - if (xcp_mgr->mode == mode) - return 0; - if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) return 0; @@ -201,6 +196,25 @@ out: return ret; } +int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) + return -EINVAL; + + if (xcp_mgr->mode == mode) + return 0; + + return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode); +} + +int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode); +} + int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) { int mode; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 9a1036aee..90138bc5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -129,6 +129,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode); int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); +int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, enum AMDGPU_XCP_IP_BLOCK ip, int instance); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7e91b2478..bd20cb3b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -103,6 +103,53 @@ static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = { smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 }; +static const int xgmi3x16_pcs_err_status_reg_v6_4[] = { + smnPCS_XGMI3X16_PCS_ERROR_STATUS, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000 +}; + +static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = { + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 +}; + +static const u64 xgmi_v6_4_0_mca_base_array[] = { + 0x11a09200, + 0x11b09200, +}; + +static const char *xgmi_v6_4_0_ras_error_code_ext[32] = { + [0x00] = "XGMI PCS DataLossErr", + [0x01] = "XGMI PCS TrainingErr", + [0x02] = "XGMI PCS FlowCtrlAckErr", + [0x03] = "XGMI PCS RxFifoUnderflowErr", + [0x04] = "XGMI PCS RxFifoOverflowErr", + [0x05] = "XGMI PCS CRCErr", + [0x06] = "XGMI PCS BERExceededErr", + [0x07] = "XGMI PCS TxMetaDataErr", + [0x08] = "XGMI PCS ReplayBufParityErr", + [0x09] = "XGMI PCS DataParityErr", + [0x0a] = "XGMI PCS ReplayFifoOverflowErr", + [0x0b] = "XGMI PCS ReplayFifoUnderflowErr", + [0x0c] = "XGMI PCS ElasticFifoOverflowErr", + [0x0d] = "XGMI PCS DeskewErr", + [0x0e] = "XGMI PCS FlowCtrlCRCErr", + [0x0f] = "XGMI PCS DataStartupLimitErr", + [0x10] = "XGMI PCS FCInitTimeoutErr", + [0x11] = "XGMI PCS RecoveryTimeoutErr", + [0x12] = "XGMI PCS ReadySerialTimeoutErr", + [0x13] = "XGMI PCS ReadySerialAttemptErr", + [0x14] = "XGMI PCS RecoveryAttemptErr", + [0x15] = "XGMI PCS RecoveryRelockAttemptErr", + [0x16] = "XGMI PCS ReplayAttemptErr", + [0x17] = "XGMI PCS SyncHdrErr", + [0x18] = "XGMI PCS TxReplayTimeoutErr", + [0x19] = "XGMI PCS RxReplayTimeoutErr", + [0x1a] = "XGMI PCS LinkSubTxTimeoutErr", + [0x1b] = "XGMI PCS LinkSubRxTimeoutErr", + [0x1c] = "XGMI PCS RxCMDPktErr", +}; + static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { {"XGMI PCS DataLossErr", SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, @@ -325,6 +372,17 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, } +static ssize_t amdgpu_xgmi_show_physical_id(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id); + +} + static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev, struct device_attribute *attr, char *buf) @@ -390,6 +448,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL); +static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL); static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL); static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL); static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL); @@ -407,6 +466,12 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, return ret; } + ret = device_create_file(adev->dev, &dev_attr_xgmi_physical_id); + if (ret) { + dev_err(adev->dev, "XGMI: Failed to create device file xgmi_physical_id\n"); + return ret; + } + /* Create xgmi error file */ ret = device_create_file(adev->dev, &dev_attr_xgmi_error); if (ret) @@ -448,6 +513,7 @@ remove_link: remove_file: device_remove_file(adev->dev, &dev_attr_xgmi_device_id); + device_remove_file(adev->dev, &dev_attr_xgmi_physical_id); device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); @@ -463,6 +529,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, memset(node, 0, sizeof(node)); device_remove_file(adev->dev, &dev_attr_xgmi_device_id); + device_remove_file(adev->dev, &dev_attr_xgmi_physical_id); device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); @@ -888,7 +955,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm adev->gmc.xgmi.num_physical_nodes == 0) return 0; - adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL); return amdgpu_ras_block_late_init(adev, ras_block); } @@ -906,7 +973,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg WREG32_PCIE(pcs_status_reg, 0); } -static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev) { uint32_t i; @@ -932,6 +999,49 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) default: break; } + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) + pcs_clear_status(adev, + xgmi3x16_pcs_err_status_reg_v6_4[i]); + break; + default: + break; + } +} + +static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base) +{ + WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); +} + +static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) + __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]); +} + +static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + for_each_inst(i, adev->aid_mask) + xgmi_v6_4_0_reset_error_count(adev, i); +} + +static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + xgmi_v6_4_0_reset_ras_error_count(adev); + break; + default: + amdgpu_xgmi_legacy_reset_ras_error_count(adev); + break; + } } static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, @@ -948,7 +1058,10 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, uint32_t field_array_size = 0; if (is_xgmi_pcs) { - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == + IP_VERSION(6, 1, 0) || + amdgpu_ip_version(adev, XGMI_HWIP, 0) == + IP_VERSION(6, 4, 0)) { pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); } else { @@ -982,11 +1095,11 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, return 0; } -static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, - void *ras_error_status) +static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - int i; + int i, supported = 1; uint32_t data, mask_data = 0; uint32_t ue_cnt = 0, ce_cnt = 0; @@ -1050,42 +1163,144 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, } break; default: - dev_warn(adev->dev, "XGMI RAS error query not supported"); + supported = 0; break; } - adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + /* check xgmi3x16 pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) { + data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]); + mask_data = + RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, true); + } + break; + default: + if (!supported) + dev_warn(adev->dev, "XGMI RAS error query not supported"); + break; + } + + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL); err_data->ue_count += ue_cnt; err_data->ce_count += ce_cnt; } +static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status) +{ + const char *error_str; + int ext_error_code; + + ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status); + + error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? + xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; + if (error_str) + dev_info(adev->dev, "%s detected\n", error_str); + + switch (ext_error_code) { + case 0: + return AMDGPU_MCA_ERROR_TYPE_UE; + case 6: + return AMDGPU_MCA_ERROR_TYPE_CE; + default: + return -EINVAL; + } + + return -EINVAL; +} + +static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 mca_base, struct ras_err_data *err_data) +{ + int xgmi_inst = mcm_info->die_id; + u64 status = 0; + + status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS); + if (!MCA_REG__STATUS__VAL(status)) + return; + + switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { + case AMDGPU_MCA_ERROR_TYPE_UE: + amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); + break; + case AMDGPU_MCA_ERROR_TYPE_CE: + amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); + break; + default: + break; + } + + WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); +} + +static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data) +{ + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = xgmi_inst, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) + __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data); +} + +static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + + for_each_inst(i, adev->aid_mask) + xgmi_v6_4_0_query_error_count(adev, i, err_data); +} + +static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status); + break; + default: + amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status); + break; + } +} + /* Trigger XGMI/WAFL error */ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if, uint32_t instance_mask) { - int ret = 0; + int ret1, ret2; struct ta_ras_trigger_error_input *block_info = (struct ta_ras_trigger_error_input *)inject_if; if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) dev_warn(adev->dev, "Failed to disallow df cstate"); - if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) + ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW); + if (ret1 && ret1 != -EOPNOTSUPP) dev_warn(adev->dev, "Failed to disallow XGMI power down"); - ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); + ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); if (amdgpu_ras_intr_triggered()) - return ret; + return ret2; - if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) + ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT); + if (ret1 && ret1 != -EOPNOTSUPP) dev_warn(adev->dev, "Failed to allow XGMI power down"); if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) dev_warn(adev->dev, "Failed to allow df cstate"); - return ret; + return ret2; } struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 86fbf5693..1592c63b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -43,7 +43,7 @@ struct amdgpu_hive_info { } pstate; struct amdgpu_reset_domain *reset_domain; - uint32_t device_remove_count; + atomic_t ras_recovery; }; struct amdgpu_pcs_ras_field { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 104a5ad83..51a14f6d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -90,10 +90,11 @@ union amd_sriov_msg_feature_flags { uint32_t host_load_ucodes : 1; uint32_t host_flr_vramlost : 1; uint32_t mm_bw_management : 1; - uint32_t pp_one_vf_mode : 1; + uint32_t pp_one_vf_mode : 1; uint32_t reg_indirect_acc : 1; uint32_t av1_support : 1; - uint32_t reserved : 25; + uint32_t vcn_rb_decouple : 1; + uint32_t reserved : 24; } flags; uint32_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index d0fc62784..3f715e7fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -500,7 +500,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, return -EINVAL; } - if (adev->kfd.init_complete) + if (adev->kfd.init_complete && !amdgpu_in_reset(adev)) flags |= AMDGPU_XCP_OPS_KFD; if (flags & AMDGPU_XCP_OPS_KFD) { diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index a13c443ea..42f4e163e 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -68,7 +68,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index a9521c98e..5a122f50a 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -77,7 +77,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(1, 3, 1): case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c index 78508ae6a..e143fcc46 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -70,7 +70,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c index f0e235f98..f0737fb3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c @@ -36,7 +36,7 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) { uint32_t data; - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(3, 0, 1): data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1); break; @@ -49,7 +49,7 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data) { - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(3, 0, 1): WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data); break; @@ -99,10 +99,11 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[ATHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 3, 0): athub_v3_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_v3_0_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 9f63ddb89..2c2210007 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1444,10 +1444,27 @@ static void atom_get_vbios_pn(struct atom_context *ctx) static void atom_get_vbios_version(struct atom_context *ctx) { + unsigned short start = 3, end; unsigned char *vbios_ver; + unsigned char *p_rom; + + p_rom = ctx->bios; + /* Search from strings offset if it's present */ + start = *(unsigned short *)(p_rom + + OFFSET_TO_GET_ATOMBIOS_STRING_START); + + /* Search till atom rom header start point */ + end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER); + + /* Use hardcoded offsets, if the offsets are not populated */ + if (end <= start) { + start = 3; + end = 1024; + } /* find anchor ATOMBIOSBK-AMD */ - vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64); + vbios_ver = + atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64); if (vbios_ver != NULL) { /* skip ATOMBIOSBK-AMD VER */ vbios_ver += 18; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index d95b2dc78..3ee219aa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -228,7 +228,6 @@ error: register_acpi_backlight: /* Try registering an ACPI video backlight device instead. */ acpi_video_register_backlight(); - return; } void diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index e63abdf52..4dfaa017c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1709,10 +1709,6 @@ static void cik_program_aspm(struct amdgpu_device *adev) if (pci_is_root_bus(adev->pdev->bus)) return; - /* XXX double check APUs */ - if (adev->flags & AMD_IS_APU) - return; - orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 6f7c031dd..f24e34dc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 52598fbc9..a3fccc4c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -308,8 +308,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; @@ -498,9 +496,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -925,9 +920,14 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, static int cik_sdma_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; + r = cik_sdma_init_microcode(adev); + if (r) + return r; + cik_sdma_set_ring_funcs(adev); cik_sdma_set_irq_funcs(adev); cik_sdma_set_buffer_funcs(adev); @@ -942,12 +942,6 @@ static int cik_sdma_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r, i; - r = cik_sdma_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index b8c47e0cf..c19681492 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 584cd5277..bb666cb75 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1036,7 +1036,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, (u32)mode->clock); line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, (u32)mode->clock); - line_time = min(line_time, (u32)65535); + line_time = min_t(u32, line_time, 65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1066,7 +1066,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, wm_high.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535); + latency_watermark_a = min_t(u32, dce_v10_0_latency_watermark(&wm_high), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ @@ -1105,7 +1105,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, wm_low.num_heads = num_heads; /* set for low clocks */ - latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535); + latency_watermark_b = min_t(u32, dce_v10_0_latency_watermark(&wm_low), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c14b70350..7af277f61 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1068,7 +1068,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, (u32)mode->clock); line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, (u32)mode->clock); - line_time = min(line_time, (u32)65535); + line_time = min_t(u32, line_time, 65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1098,7 +1098,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, wm_high.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535); + latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ @@ -1137,7 +1137,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, wm_low.num_heads = num_heads; /* set for low clocks */ - latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535); + latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 7f85ba5b7..143efc37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -845,7 +845,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, (u32)mode->clock); line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, (u32)mode->clock); - line_time = min(line_time, (u32)65535); + line_time = min_t(u32, line_time, 65535); priority_a_cnt = 0; priority_b_cnt = 0; @@ -906,9 +906,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, wm_low.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535); + latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535); /* set for low clocks */ - latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535); + latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index f2b3cb5ed..adeddfb7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -975,7 +975,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, (u32)mode->clock); line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, (u32)mode->clock); - line_time = min(line_time, (u32)65535); + line_time = min_t(u32, line_time, 65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1005,7 +1005,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, wm_high.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535); + latency_watermark_a = min_t(u32, dce_v8_0_latency_watermark(&wm_high), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ @@ -1044,7 +1044,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, wm_low.num_heads = num_heads; /* set for low clocks */ - latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535); + latency_watermark_b = min_t(u32, dce_v8_0_latency_watermark(&wm_low), 65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c new file mode 100644 index 000000000..a47960a0b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v4_6_2.h" + +static bool df_v4_6_2_query_ras_poison_mode(struct amdgpu_device *adev) +{ + /* return true since related regs are inaccessible */ + return true; +} + +const struct amdgpu_df_funcs df_v4_6_2_funcs = { + .query_ras_poison_mode = df_v4_6_2_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h new file mode 100644 index 000000000..3bc3e6d21 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V4_6_2_H__ +#define __DF_V4_6_2_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_df_funcs df_v4_6_2_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index c2b9dfc64..dcdecb18b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -102,6 +102,11 @@ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Cyan_Skillfish 0x0105 +#define mmGOLDEN_TSC_COUNT_UPPER_Cyan_Skillfish_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Cyan_Skillfish 0x0106 +#define mmGOLDEN_TSC_COUNT_LOWER_Cyan_Skillfish_BASE_IDX 1 + #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 @@ -3493,6 +3498,8 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, unsigned int vmid); +static int gfx_v10_0_set_powergating_state(void *handle, + enum amd_powergating_state state); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); @@ -3627,7 +3634,7 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, golden_settings_gc_rlc_spm_10_0_nv10, @@ -3650,7 +3657,7 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, golden_settings_gc_10_1, @@ -3891,7 +3898,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) { adev->gfx.cp_fw_write_wait = false; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 2): case IP_VERSION(10, 1, 1): @@ -3942,7 +3949,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; @@ -3964,8 +3971,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) && - (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00))) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) && + (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00))) wks = "_wks"; amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); @@ -4020,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) err = 0; adev->gfx.mec2_fw = NULL; } - amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); - amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); gfx_v10_0_check_fw_write_wait(adev); out: @@ -4141,7 +4146,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3); reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); @@ -4355,7 +4360,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -4488,7 +4493,7 @@ static int gfx_v10_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -4746,9 +4751,12 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) && + if (((amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 0)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 3)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 6))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); @@ -4776,7 +4784,7 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade /* for ASICs that integrates GFX v10.3 * pa_sc_tile_steering_override should be set to 0 */ - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) return 0; /* init num_sc */ @@ -4957,7 +4965,7 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) /* TCCs are global (not instanced). */ uint32_t tcc_disable; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) { tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); } else { @@ -5034,7 +5042,7 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) { WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, @@ -5663,7 +5671,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); else WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); @@ -6054,7 +6062,7 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, } WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -6187,7 +6195,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { if (enable) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -6203,7 +6211,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) break; } } else { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -6303,7 +6311,7 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -6456,6 +6464,13 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { + mutex_lock(&adev->srbm_mutex); + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1) + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + + nv_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -6573,7 +6588,8 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); #endif tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, + prop->allow_tunneling); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); mqd->cp_hqd_pq_control = tmp; @@ -6914,7 +6930,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) * check if mmVGT_ESGS_RING_SIZE_UMD * has been remapped to mmVGT_ESGS_RING_SIZE */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 4): @@ -6963,7 +6979,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -7136,19 +7152,19 @@ static int gfx_v10_0_hw_init(void *handle) * init golden registers and rlc resume may override some registers, * reconfig them here */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 10) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) gfx_v10_0_tcp_harvest(adev); r = gfx_v10_0_cp_resume(adev); if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) gfx_v10_3_program_pbb_mode(adev); - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) gfx_v10_3_set_power_brake_sequence(adev); return r; @@ -7161,6 +7177,13 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + /* WA added for Vangogh asic fixing the SMU suspend failure + * It needs to set power gating again during gfxoff control + * otherwise the gfxoff disallowing will be failed to set. + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) + gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); + if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { if (amdgpu_gfx_disable_kgq(adev, 0)) @@ -7252,7 +7275,7 @@ static int gfx_v10_0_soft_reset(void *handle) /* GRBM_STATUS2 */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -7309,7 +7332,23 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock, clock_lo, clock_hi, hi_check; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 1, 4): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Cyan_Skillfish); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Cyan_Skillfish); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Cyan_Skillfish); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Cyan_Skillfish); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 7): @@ -7396,7 +7435,7 @@ static int gfx_v10_0_early_init(void *handle) adev->gfx.funcs = &gfx_v10_0_gfx_funcs; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -7467,7 +7506,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -7505,7 +7544,7 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) uint32_t data; data = RLC_SAFE_MODE__CMD_MASK; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -7816,7 +7855,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d mmCGTS_SA1_QUAD1_SM_CTRL_REG }; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) { for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) { reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] + tcp_ctrl_regs_nv12[i]; @@ -7861,9 +7900,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, /* === CGCG + CGLS === */ gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))) + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 1, 10)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 1, 1)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 1, 2))) gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS @@ -7894,22 +7936,15 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, unsigned int vmid) { - u32 reg, data; + u32 data; /* not for *_SOC15 */ - reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); - if (amdgpu_sriov_is_pp_one_vf(adev)) - data = RREG32_NO_KIQ(reg); - else - data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + data = RREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; - if (amdgpu_sriov_is_pp_one_vf(adev)) - WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); - else - WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); } static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) @@ -7970,7 +8005,7 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) * Power/performance team will optimize it and might give a new value later. */ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 6): @@ -8031,7 +8066,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -8068,7 +8103,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -9315,7 +9350,7 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 3): @@ -9432,10 +9467,14 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6)) || - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 7))) && + if (((amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 0)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 3)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 6)) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(10, 3, 7))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; mask = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d0c3ec9f4..806a8cf90 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -60,6 +60,8 @@ #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 +#define regPC_CONFIG_CNTL_1 0x194d +#define regPC_CONFIG_CNTL_1_BASE_IDX 1 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); @@ -82,6 +84,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_11_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) @@ -100,6 +106,23 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) }; +static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188), + SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007), + SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007), + SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a), + SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -136,6 +159,7 @@ static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue { amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | + PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ @@ -269,13 +293,18 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): soc15_program_register_sequence(adev, golden_settings_gc_11_0_1, (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); break; + case IP_VERSION(11, 5, 0): + soc15_program_register_sequence(adev, + golden_settings_gc_11_5_0, + (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0)); + break; default: break; } @@ -473,7 +502,7 @@ out: static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): @@ -569,6 +598,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) adev->gfx.mec2_fw = NULL; gfx_v11_0_check_fw_cp_gfx_shadow(adev); + + if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) { + err = adev->gfx.imu.funcs->init_microcode(adev); + if (err) + DRM_ERROR("Failed to init imu firmware!\n"); + return err; + } + out: if (err) { amdgpu_ucode_release(&adev->gfx.pfp_fw); @@ -864,8 +901,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) { - - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): adev->gfx.config.max_hw_contexts = 8; @@ -884,6 +920,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1309,9 +1346,7 @@ static int gfx_v11_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfxhub.funcs->init(adev); - - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): @@ -1324,6 +1359,7 @@ static int gfx_v11_0_sw_init(void *handle) break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -1342,8 +1378,8 @@ static int gfx_v11_0_sw_init(void *handle) } /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) && - amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && + amdgpu_sriov_is_pp_one_vf(adev)) adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; /* EOP Event */ @@ -1376,14 +1412,6 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - if (adev->gfx.imu.funcs) { - if (adev->gfx.imu.funcs->init_microcode) { - r = adev->gfx.imu.funcs->init_microcode(adev); - if (r) - DRM_ERROR("Failed to load imu firmware!\n"); - } - } - gfx_v11_0_me_init(adev); r = gfx_v11_0_rlc_init(adev); @@ -2570,8 +2598,11 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) for (i = 0; i < adev->usec_timeout; i++) { cp_status = RREG32_SOC15(GC, 0, regCP_STAT); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(11, 0, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(11, 0, 4) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0)) bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); else @@ -3808,7 +3839,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, + prop->allow_tunneling); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); mqd->cp_hqd_pq_control = tmp; @@ -4351,6 +4383,10 @@ static int gfx_v11_0_hw_init(void *handle) if (r) return r; + /* get IMU version from HW if it's not set */ + if (!adev->gfx.imu_fw_version) + adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0); + return r; } @@ -4961,23 +4997,16 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { - u32 reg, data; + u32 data; amdgpu_gfx_off_ctrl(adev, false); - reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); - if (amdgpu_sriov_is_pp_one_vf(adev)) - data = RREG32_NO_KIQ(reg); - else - data = RREG32(reg); + data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; - if (amdgpu_sriov_is_pp_one_vf(adev)) - WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); - else - WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); + WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); amdgpu_gfx_off_ctrl(adev, true); } @@ -5009,9 +5038,10 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) // Program RLC_PG_DELAY3 for CGPG hysteresis if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); break; default: @@ -5038,7 +5068,7 @@ static int gfx_v11_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): @@ -5046,6 +5076,7 @@ static int gfx_v11_0_set_powergating_state(void *handle, break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): if (!enable) amdgpu_gfx_off_ctrl(adev, false); @@ -5070,12 +5101,13 @@ static int gfx_v11_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): gfx_v11_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -5152,45 +5184,17 @@ static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; - uint64_t wptr_tmp; - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); - - wptr_tmp = ring->wptr & ring->buf_mask; - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); - *wptr_saved = wptr_tmp; - /* assume doorbell always being used by mes mapped queue */ - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, wptr_tmp); - WDOORBELL64(ring->doorbell_index, wptr_tmp); - } else { - WDOORBELL64(ring->doorbell_index, wptr_tmp); - - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, wptr_tmp); - } + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, + ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); } else { - if (ring->use_doorbell) { - /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr); - WDOORBELL64(ring->doorbell_index, ring->wptr); - } else { - WREG32_SOC15(GC, 0, regCP_RB0_WPTR, - lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, - upper_32_bits(ring->wptr)); - } + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, + lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, + upper_32_bits(ring->wptr)); } } @@ -5215,42 +5219,14 @@ static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; - uint64_t wptr_tmp; - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); - - wptr_tmp = ring->wptr & ring->buf_mask; - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); - *wptr_saved = wptr_tmp; - /* assume doorbell always used by mes mapped queue */ - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, wptr_tmp); - WDOORBELL64(ring->doorbell_index, wptr_tmp); - } else { - WDOORBELL64(ring->doorbell_index, wptr_tmp); - - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, wptr_tmp); - } + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) { + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, + ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); } else { - /* XXX check if swapping is necessary on BE */ - if (ring->use_doorbell) { - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr); - WDOORBELL64(ring->doorbell_index, ring->wptr); - } else { - BUG(); /* only DOORBELL method supported on gfx11 now */ - } + BUG(); /* only DOORBELL method supported on gfx11 now */ } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2e23d08b4..3bc694336 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -895,7 +895,7 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_0, @@ -951,8 +951,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) break; } - if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && - (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))) + if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))) soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } @@ -1095,14 +1095,14 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; - if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && + if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && ((adev->gfx.mec_fw_version < 0x000001a5) || - (adev->gfx.mec_feature_version < 46) || - (adev->gfx.pfp_fw_version < 0x000000b7) || - (adev->gfx.pfp_feature_version < 46))) + (adev->gfx.mec_feature_version < 46) || + (adev->gfx.pfp_fw_version < 0x000000b7) || + (adev->gfx.pfp_feature_version < 46))) DRM_WARN_ONCE("CP firmware version too old, please update!"); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42) && @@ -1202,7 +1202,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev) static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) { - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) && + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) && (adev->gfx.me_fw_version >= 0x000000a5) && (adev->gfx.me_feature_version >= 52)) return true; @@ -1215,7 +1215,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -1326,9 +1326,9 @@ out: static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) return false; return true; @@ -1485,7 +1485,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) always_on_cu_num = 4; - else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1)) + else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1)) always_on_cu_num = 8; else always_on_cu_num = 12; @@ -1836,7 +1836,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) u32 gb_addr_config; int err; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -2002,7 +2002,7 @@ static int gfx_v9_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; unsigned int hw_prio; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -2363,7 +2363,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) { uint32_t tmp; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 1): tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT, @@ -2700,7 +2700,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0)) pwr_10_0_gfxip_control_over_cgpg(adev, true); } } @@ -2812,7 +2812,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) || + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 2, 1) || (adev->apu_flags & AMD_APU_IS_RAVEN2)) gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); @@ -2925,7 +2926,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 2, 2): case IP_VERSION(9, 1, 0): gfx_v9_0_init_lbpw(adev); @@ -3033,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); + /* Now only limit the quirk on the APU gfx9 series and already + * confirmed that the APU gfx10/gfx11 needn't such update. + */ + if (adev->flags & AMD_IS_APU && + adev->in_s3 && !adev->suspend_complete) { + DRM_INFO(" Will skip the CSB packet resubmit\n"); + return 0; + } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); @@ -3713,8 +3722,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) { u32 tmp; - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) && - adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) && + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) return; tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); @@ -3754,7 +3763,7 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) gfx_v9_4_2_set_power_brake_sequence(adev); return r; @@ -3802,7 +3811,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || - (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) { dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } @@ -3986,7 +3995,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock, clock_lo, clock_hi, hi_check; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 3, 0): preempt_disable(); clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); @@ -4005,7 +4014,9 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) default: amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 0, 1) && + amdgpu_sriov_runtime(adev)) { clock = gfx_v9_0_kiq_read_clock(adev); } else { WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); @@ -4357,7 +4368,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); vgpr_init_regs_ptr = vgpr_init_regs_arcturus; @@ -4509,8 +4520,8 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.funcs = &gfx_v9_0_gfx_funcs; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; @@ -4548,7 +4559,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) } /* requires IBs so do in late init after IB pool is initialized */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); else r = gfx_v9_0_do_edc_gpr_workarounds(adev); @@ -4580,7 +4591,7 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) gfx_v9_4_2_debug_trap_config_init(adev, adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); else @@ -4676,7 +4687,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1)) data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -4710,7 +4721,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1)) data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | @@ -4816,7 +4827,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev /* enable cgcg FSM(0x0000363F) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; else @@ -4951,7 +4962,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_PG_STATE_GATE); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 2, 2): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 3, 0): @@ -4998,7 +5009,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -5048,7 +5059,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags) if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) @@ -6454,7 +6465,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, return ret; } -static const char *vml2_mems[] = { +static const char * const vml2_mems[] = { "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", "UTC_VML2_BANK_CACHE_0_4K_MEM0", @@ -6473,7 +6484,7 @@ static const char *vml2_mems[] = { "UTC_VML2_BANK_CACHE_3_4K_MEM1", }; -static const char *vml2_walker_mems[] = { +static const char * const vml2_walker_mems[] = { "UTC_VML2_CACHE_PDE0_MEM0", "UTC_VML2_CACHE_PDE0_MEM1", "UTC_VML2_CACHE_PDE1_MEM0", @@ -6483,7 +6494,7 @@ static const char *vml2_walker_mems[] = { "UTC_VML2_RDIF_LOG_FIFO", }; -static const char *atc_l2_cache_2m_mems[] = { +static const char * const atc_l2_cache_2m_mems[] = { "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", @@ -7087,7 +7098,7 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -7106,7 +7117,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 4, 0): @@ -7128,7 +7139,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 4, 0): adev->gds.gds_compute_max_wave_id = 0x7ff; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 63f6843a0..065b2bd5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -746,8 +746,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, die_id); break; } - - return; } void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, @@ -1548,8 +1546,8 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, uint32_t ded_cnt) { uint32_t bank, way, mem; - static const char *vml2_way_str[] = { "BIGK", "4K" }; - static const char *utcl2_rounter_str[] = { "VMC", "APT" }; + static const char * const vml2_way_str[] = { "BIGK", "4K" }; + static const char * const utcl2_rounter_str[] = { "VMC", "APT" }; mem = instance % blk->num_mem_blocks; way = (instance / blk->num_mem_blocks) % blk->num_ways; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e481ef73a..4a09cc0d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -256,6 +256,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); WREG32(scratch_reg0_offset, 0xCAFEDEAD); + tmp = RREG32(scratch_reg0_offset); r = amdgpu_ring_alloc(ring, 3); if (r) @@ -623,7 +624,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) { int ret, i, num_xcc; - u32 tmp = 0, regval; + u32 tmp = 0; if (adev->psp.funcs) { ret = psp_spatial_partition(&adev->psp, @@ -631,24 +632,23 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, num_xccs_per_xcp); if (ret) return ret; - } - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); + } else { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xccs_per_xcp); - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xccs_per_xcp); - regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL); - if (regval != tmp) + for (i = 0; i < num_xcc; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xccs_per_xcp); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xccs_per_xcp); WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); + } + ret = 0; } adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; - return 0; + return ret; } static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) @@ -682,7 +682,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; adev->gfx.ras = &gfx_v9_4_3_ras; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -1102,6 +1102,7 @@ static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); } + adev->gfx.rlc.rlcg_reg_access_supported = true; } static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) @@ -2430,7 +2431,7 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, return 0; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): for (i = 0; i < num_xcc; i++) gfx_v9_4_3_xcc_update_gfx_clock_gating( @@ -2738,16 +2739,16 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); break; case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); break; default: break; @@ -3653,19 +3654,19 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { AMDGPU_GFX_GC_CANE_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, - AMDGPU_GFX_SPI_MEM, 8}, + AMDGPU_GFX_SPI_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, - AMDGPU_GFX_SP_MEM, 1}, + AMDGPU_GFX_SP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, - AMDGPU_GFX_SP_MEM, 1}, + AMDGPU_GFX_SP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, - AMDGPU_GFX_SQ_MEM, 8}, + AMDGPU_GFX_SQ_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, - AMDGPU_GFX_SQC_MEM, 8}, + AMDGPU_GFX_SQC_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, AMDGPU_GFX_TCX_MEM, 1}, @@ -3674,22 +3675,22 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { AMDGPU_GFX_TCC_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, - AMDGPU_GFX_TA_MEM, 8}, + AMDGPU_GFX_TA_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), - 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, + 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, AMDGPU_GFX_TCI_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, - AMDGPU_GFX_TCP_MEM, 8}, + AMDGPU_GFX_TCP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, - AMDGPU_GFX_TD_MEM, 8}, + AMDGPU_GFX_TD_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, AMDGPU_GFX_GCEA_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, - AMDGPU_GFX_LDS_MEM, 1}, + AMDGPU_GFX_LDS_MEM, 4}, }; static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { @@ -3713,19 +3714,19 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { AMDGPU_GFX_GC_CANE_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, - AMDGPU_GFX_SPI_MEM, 8}, + AMDGPU_GFX_SPI_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, - AMDGPU_GFX_SP_MEM, 1}, + AMDGPU_GFX_SP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, - AMDGPU_GFX_SP_MEM, 1}, + AMDGPU_GFX_SP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, - AMDGPU_GFX_SQ_MEM, 8}, + AMDGPU_GFX_SQ_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, - AMDGPU_GFX_SQC_MEM, 8}, + AMDGPU_GFX_SQC_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, AMDGPU_GFX_TCX_MEM, 1}, @@ -3734,16 +3735,16 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { AMDGPU_GFX_TCC_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, - AMDGPU_GFX_TA_MEM, 8}, + AMDGPU_GFX_TA_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), - 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, + 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, AMDGPU_GFX_TCI_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, - AMDGPU_GFX_TCP_MEM, 8}, + AMDGPU_GFX_TCP_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, - AMDGPU_GFX_TD_MEM, 8}, + AMDGPU_GFX_TD_MEM, 4}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, AMDGPU_GFX_TCA_MEM, 1}, @@ -3752,11 +3753,7 @@ static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { AMDGPU_GFX_GCEA_MEM, 1}, {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, - AMDGPU_GFX_LDS_MEM, 1}, -}; - -static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = { - SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 + AMDGPU_GFX_LDS_MEM, 4}, }; static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, @@ -3766,6 +3763,12 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, unsigned long ce_count = 0, ue_count = 0; uint32_t i, j, k; + /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = xcc_id & 0x01 ? 1 : 0, + }; + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { @@ -3797,6 +3800,27 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, } } + /* handle extra register entries of UE */ + for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || + gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); + + amdgpu_ras_inst_query_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, + GET_INST(GC, xcc_id), + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &ue_count); + } + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); @@ -3804,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, /* the caller should make sure initialize value of * err_data->ue_count and err_data->ce_count */ - err_data->ce_count += ce_count; - err_data->ue_count += ue_count; + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); } static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, @@ -3836,36 +3860,20 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, } } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - -static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t i, j; - uint32_t reg_value; - - mutex_lock(&adev->grbm_idx_mutex); + /* handle extra register entries of UE */ + for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || + gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); - for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) { - for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) { - gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id); - reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), - regGCEA_ERR_STATUS); - if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || - REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || - REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { - dev_warn(adev->dev, - "GCEA err detected at instance: %d, status: 0x%x!\n", - j, reg_value); + amdgpu_ras_inst_reset_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + GET_INST(GC, xcc_id)); } - /* clear after read */ - reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS, - CLEAR_ERROR_STATUS, 0x1); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, - reg_value); } } @@ -3978,7 +3986,6 @@ static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev, static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { - gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id); gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id); } @@ -3991,27 +3998,6 @@ static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3); } -static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t i, j; - uint32_t value; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) { - for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) { - gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id); - value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS); - value = REG_SET_FIELD(value, GCEA_ERR_STATUS, - CLEAR_ERROR_STATUS, 0x1); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value); - } - } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev, int xcc_id) { @@ -4037,7 +4023,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); - gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id); gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); } @@ -4231,7 +4216,7 @@ static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): /* 9.4.3 removed all the GDS internal memory, * only support GWS opcode in kernel, like barrier @@ -4243,7 +4228,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): /* deprecated for 9.4.3, no usage at all */ adev->gds.gds_compute_max_wave_id = 0; @@ -4354,7 +4339,7 @@ const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 9, .minor = 4, - .rev = 0, + .rev = 3, .funcs = &gfx_v9_4_3_ip_funcs, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c new file mode 100644 index 000000000..f9949fedf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c @@ -0,0 +1,516 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "gfxhub_v11_5_0.h" + +#include "gc/gc_11_5_0_offset.h" +#include "gc/gc_11_5_0_sh_mask.h" + +#include "navi10_enum.h" +#include "soc15_common.h" + +#define regGCVM_L2_CNTL3_DEFAULT 0x80100007 +#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1 +#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0 + + +static const char *gfxhub_client_ids[] = { + "CB/DB", + "Reserved", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "SDMA2", + "SDMA3", +}; + +static uint32_t gfxhub_v11_5_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +gfxhub_v11_5_0_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + u32 cid = REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID); + + dev_err(adev->dev, + "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, RW)); +} + +static u64 gfxhub_v11_5_0_get_fb_location(struct amdgpu_device *adev) +{ + u64 base = RREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE); + + base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + return base; +} + +static u64 gfxhub_v11_5_0_get_mc_fb_offset(struct amdgpu_device *adev) +{ + return (u64)RREG32_SOC15(GC, 0, regGCMC_VM_FB_OFFSET) << 24; +} + +static void gfxhub_v11_5_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); +} + +static void gfxhub_v11_5_0_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v11_5_0_setup_vm_pt_regs(adev, 0, pt_base); + + WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); +} + +static void gfxhub_v11_5_0_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); +} + +static void gfxhub_v11_5_0_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC); /* UC, uncached */ + + WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void gfxhub_v11_5_0_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, + ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, + L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL2, tmp); + + tmp = regGCVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, tmp); + + tmp = regGCVM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL4, tmp); + + tmp = regGCVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL5, tmp); +} + +static void gfxhub_v11_5_0_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL, tmp); +} + +static void gfxhub_v11_5_0_disable_identity_aperture(struct amdgpu_device *adev) +{ + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0xFFFFFFFF); + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + 0); + + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + +} + +static void gfxhub_v11_5_0_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + int i; + uint32_t tmp; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i * hub->ctx_distance); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + adev->vm_manager.num_level); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !amdgpu_noretry); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } + + hub->vm_cntx_cntl = tmp; +} + +static void gfxhub_v11_5_0_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + unsigned i; + + for (i = 0 ; i < 18; ++i) { + WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int gfxhub_v11_5_0_gart_enable(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) { + /* + * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + gfxhub_v11_5_0_init_gart_aperture_regs(adev); + gfxhub_v11_5_0_init_system_aperture_regs(adev); + gfxhub_v11_5_0_init_tlb_regs(adev); + gfxhub_v11_5_0_init_cache_regs(adev); + + gfxhub_v11_5_0_enable_system_domain(adev); + gfxhub_v11_5_0_disable_identity_aperture(adev); + gfxhub_v11_5_0_setup_vmid_config(adev); + gfxhub_v11_5_0_program_invalidation(adev); + + return 0; +} + +static void gfxhub_v11_5_0_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp); + + /* Setup L2 cache */ + WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, 0); +} + +/** + * gfxhub_v11_5_0_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void gfxhub_v11_5_0_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + + /* NO halt CP when page fault */ + tmp = RREG32_SOC15(GC, 0, regCP_DEBUG); + tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1); + WREG32_SOC15(GC, 0, regCP_DEBUG, tmp); + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static const struct amdgpu_vmhub_funcs gfxhub_v11_5_0_vmhub_funcs = { + .print_l2_protection_fault_status = gfxhub_v11_5_0_print_l2_protection_fault_status, + .get_invalidate_req = gfxhub_v11_5_0_get_invalidate_req, +}; + +static void gfxhub_v11_5_0_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(GC, 0, + regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(GC, 0, + regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = regGCVM_CONTEXT1_CNTL - regGCVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regGCVM_INVALIDATE_ENG1_REQ - + regGCVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &gfxhub_v11_5_0_vmhub_funcs; +} + +const struct amdgpu_gfxhub_funcs gfxhub_v11_5_0_funcs = { + .get_fb_location = gfxhub_v11_5_0_get_fb_location, + .get_mc_fb_offset = gfxhub_v11_5_0_get_mc_fb_offset, + .setup_vm_pt_regs = gfxhub_v11_5_0_setup_vm_pt_regs, + .gart_enable = gfxhub_v11_5_0_gart_enable, + .gart_disable = gfxhub_v11_5_0_gart_disable, + .set_fault_enable_default = gfxhub_v11_5_0_set_fault_enable_default, + .init = gfxhub_v11_5_0_init, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h new file mode 100644 index 000000000..265ab631b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFXHUB_V11_5_0_H__ +#define __GFXHUB_V11_5_0_H__ + +extern const struct amdgpu_gfxhub_funcs gfxhub_v11_5_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index cdc290a47..22175da0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. @@ -260,7 +262,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) block_size -= 9; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 0834af771..95d06da54 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. @@ -329,7 +331,8 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev, for_each_inst(j, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, + i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, num_level); @@ -356,11 +359,14 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev, * the SQ per-process. * Retry faults need to be enabled for that to work. */ - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !adev->gmc.noretry || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)); + tmp = REG_SET_FIELD( + tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry || + amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 3)); WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index a041c6c97..793faf62c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -287,7 +287,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); @@ -471,6 +471,9 @@ static void gfxhub_v2_0_init(struct amdgpu_device *adev) GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + /* TODO: This is only needed on some Navi 1x revisions */ + hub->sdma_invalidation_workaround = true; + hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 7708d5ded..cd0e8a321 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -296,7 +296,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); @@ -510,7 +510,7 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) u32 max_num_physical_nodes = 0; u32 max_physical_node_id = 0; - switch (adev->ip_versions[XGMI_HWIP][0]) { + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { case IP_VERSION(4, 8, 0): max_num_physical_nodes = 4; max_physical_node_id = 3; @@ -548,7 +548,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): /* Get SA disabled bitmap from eFuse setting */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index e1c76c070..abe30c8bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -164,8 +164,7 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -295,7 +294,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index 07f369c7a..b3ef6e718 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -169,8 +169,7 @@ static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -300,7 +299,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index fa87a85e1..6c5185608 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -51,8 +51,6 @@ #include "athub_v2_0.h" #include "athub_v2_1.h" -#include "amdgpu_reset.h" - static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, @@ -145,11 +143,15 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * the new fast GRBM interface. */ if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) + (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, + entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); } if (!printk_ratelimit()) @@ -230,20 +232,47 @@ static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info( * by the amdgpu vm/hsa code. */ -static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, - unsigned int vmhub, uint32_t flush_type) +/** + * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * @vmhub: vmhub type + * @flush_type: the flush type + * + * Flush the TLB for the requested page table. + */ +static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 tmp; /* Use register 17 for GART */ const unsigned int eng = 17; - unsigned int i; unsigned char hub_ip = 0; + u32 sem, req, ack; + unsigned int i; + u32 tmp; + + sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; + req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? - GC_HWIP : MMHUB_HWIP; + /* flush hdp cache */ + adev->hdp.funcs->flush_hdp(adev, NULL); + + /* For SRIOV run time, driver shouldn't access the register through MMIO + * Directly use kiq to do the vm invalidation instead + */ + if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, GET_INST(GC, 0)); + return; + } + + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -257,9 +286,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, hub_ip); - + tmp = RREG32_RLC_NO_KIQ(sem, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -269,24 +296,19 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng, - inv_req, hub_ip); + WREG32_RLC_NO_KIQ(req, inv_req, hub_ip); /* * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ if ((vmhub == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) - RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng, hub_ip); + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0))) + RREG32_RLC_NO_KIQ(req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng, hub_ip); - + tmp = RREG32_RLC_NO_KIQ(ack, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -296,109 +318,13 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) - /* - * add semaphore release after invalidation, - * write with 0 means semaphore release - */ - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0, hub_ip); + WREG32_RLC_NO_KIQ(sem, 0, hub_ip); spin_unlock(&adev->gmc.invalidate_lock); - if (i < adev->usec_timeout) - return; - - DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub); -} - -/** - * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback - * - * @adev: amdgpu_device pointer - * @vmid: vm instance to flush - * @vmhub: vmhub type - * @flush_type: the flush type - * - * Flush the TLB for the requested page table. - */ -static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, - uint32_t vmhub, uint32_t flush_type) -{ - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct dma_fence *fence; - struct amdgpu_job *job; - - int r; - - /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); - - /* For SRIOV run time, driver shouldn't access the register through MMIO - * Directly use kiq to do the vm invalidation instead - */ - if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - down_read_trylock(&adev->reset_domain->sem)) { - struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned int eng = 17; - u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; - u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); - - up_read(&adev->reset_domain->sem); - return; - } - - mutex_lock(&adev->mman.gtt_window_lock); - - if (vmhub == AMDGPU_MMHUB0(0)) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0); - mutex_unlock(&adev->mman.gtt_window_lock); - return; - } - - BUG_ON(vmhub != AMDGPU_GFXHUB(0)); - - if (!adev->mman.buffer_funcs_enabled || - !adev->ib_pool_ready || - amdgpu_in_reset(adev) || - ring->sched.ready == false) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0); - mutex_unlock(&adev->mman.gtt_window_lock); - return; - } - - /* The SDMA on Navi has a bug which can theoretically result in memory - * corruption if an invalidation happens at the same time as an VA - * translation. Avoid this by doing the invalidation from the SDMA - * itself. - */ - r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, - AMDGPU_FENCE_OWNER_UNDEFINED, - 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, - &job); - if (r) - goto error_alloc; - - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); - job->vm_needs_flush = true; - job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; - amdgpu_ring_pad_ib(ring, &job->ibs[0]); - fence = amdgpu_job_submit(job); - - mutex_unlock(&adev->mman.gtt_window_lock); - - dma_fence_wait(fence, false); - dma_fence_put(fence); - - return; - -error_alloc: - mutex_unlock(&adev->mman.gtt_window_lock); - DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); + if (i >= adev->usec_timeout) + dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n", + vmhub); } /** @@ -412,62 +338,31 @@ error_alloc: * * Flush the TLB for the requested pasid. */ -static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub, uint32_t inst) +static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { + uint16_t queried; int vmid, i; - signed long r; - uint32_t seq; - uint16_t queried_pasid; - bool ret; - u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - - if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq[0].ring_lock); - /* 2 dwords flush + 8 dwords fence */ - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return -ETIME; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; - } for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { - - ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - if (all_hub) { - for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) - gmc_v10_0_flush_gpu_tlb(adev, vmid, - i, flush_type); - } else { - gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB(0), flush_type); - } - if (!adev->enable_mes) - break; + bool valid; + + valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried); + if (!valid || queried != pasid) + continue; + + if (all_hub) { + for_each_set_bit(i, adev->vmhubs_mask, + AMDGPU_MAX_VMHUBS) + gmc_v10_0_flush_gpu_tlb(adev, vmid, i, + flush_type); + } else { + gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), + flush_type); } } - - return 0; } static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, @@ -634,6 +529,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, } if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_EXT_COHERENT | AMDGPU_GEM_CREATE_UNCACHED)) *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) | AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); @@ -680,7 +576,7 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[UMC_HWIP][0]) { + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { case IP_VERSION(8, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; @@ -697,7 +593,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 3, 0): case IP_VERSION(2, 4, 0): case IP_VERSION(2, 4, 1): @@ -711,7 +607,7 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): @@ -776,9 +672,11 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); - amdgpu_gmc_gart_location(adev, mc); - amdgpu_gmc_agp_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); @@ -825,7 +723,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) /* set the gart size */ if (amdgpu_gart_size == -1) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { default: adev->gmc.gart_size = 512ULL << 20; break; @@ -892,7 +790,7 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.vram_vendor = vram_vendor; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): adev->gmc.mall_size = 128 * 1024 * 1024; break; @@ -910,7 +808,7 @@ static int gmc_v10_0_sw_init(void *handle) break; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): @@ -1084,8 +982,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) static int gmc_v10_0_hw_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; /* The sequence of these two function calls matters.*/ gmc_v10_0_init_golden_registers(adev); @@ -1141,6 +1041,10 @@ static int gmc_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + return 0; } @@ -1195,7 +1099,8 @@ static int gmc_v10_0_set_clockgating_state(void *handle, * is a new problem observed at DF 3.0.3, however with the same suspend sequence not * seen any issue on the DF 3.0.2 series platform. */ - if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { + if (adev->in_s0ix && + amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) { dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); return 0; } @@ -1204,7 +1109,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle, if (r) return r; - if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) return athub_v2_1_set_clockgating(adev, state); else return athub_v2_0_set_clockgating(adev, state); @@ -1214,13 +1119,13 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4)) return; adev->mmhub.funcs->get_clockgating(adev, flags); - if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) athub_v2_1_get_clockgating(adev, flags); else athub_v2_0_get_clockgating(adev, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index e3b76fd28..c9c653cfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -42,9 +42,11 @@ #include "nbio_v4_3.h" #include "gfxhub_v3_0.h" #include "gfxhub_v3_0_3.h" +#include "gfxhub_v11_5_0.h" #include "mmhub_v3_0.h" #include "mmhub_v3_0_1.h" #include "mmhub_v3_0_2.h" +#include "mmhub_v3_3.h" #include "athub_v3_0.h" @@ -71,7 +73,8 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (!adev->in_s0ix) + if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend || + amdgpu_in_reset(adev))) amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: @@ -117,6 +120,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, + entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); } if (printk_ratelimit()) { @@ -184,27 +190,50 @@ static bool gmc_v11_0_get_vmid_pasid_mapping_info( return !!(*p_pasid); } -/* - * GART - * VMID 0 is the physical GPU addresses as used by the kernel. - * VMIDs 1-15 are used for userspace clients and are handled - * by the amdgpu vm/hsa code. +/** + * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * @vmhub: which hub to flush + * @flush_type: the flush type + * + * Flush the TLB for the requested page table. */ - -static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, - unsigned int vmhub, uint32_t flush_type) +static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 tmp; /* Use register 17 for GART */ const unsigned int eng = 17; + unsigned char hub_ip; + u32 sem, req, ack; unsigned int i; - unsigned char hub_ip = 0; + u32 tmp; + + if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) + return; + + sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; + req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; + + /* flush hdp cache */ + adev->hdp.funcs->flush_hdp(adev, NULL); + + /* For SRIOV run time, driver shouldn't access the register through MMIO + * Directly use kiq to do the vm invalidation instead + */ + if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, GET_INST(GC, 0)); + return; + } - hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? - GC_HWIP : MMHUB_HWIP; + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -218,8 +247,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, hub_ip); + tmp = RREG32_RLC_NO_KIQ(sem, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -229,12 +257,11 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); + WREG32_RLC_NO_KIQ(req, inv_req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng, hub_ip); + tmp = RREG32_RLC_NO_KIQ(ack, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -244,12 +271,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) - /* - * add semaphore release after invalidation, - * write with 0 means semaphore release - */ - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0, hub_ip); + WREG32_RLC_NO_KIQ(sem, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ if ((vmhub != AMDGPU_GFXHUB(0)) && @@ -266,50 +288,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, spin_unlock(&adev->gmc.invalidate_lock); - if (i < adev->usec_timeout) - return; - - DRM_ERROR("Timeout waiting for VM flush ACK!\n"); -} - -/** - * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback - * - * @adev: amdgpu_device pointer - * @vmid: vm instance to flush - * @vmhub: which hub to flush - * @flush_type: the flush type - * - * Flush the TLB for the requested page table. - */ -static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, - uint32_t vmhub, uint32_t flush_type) -{ - if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) - return; - - /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); - - /* For SRIOV run time, driver shouldn't access the register through MMIO - * Directly use kiq to do the vm invalidation instead - */ - if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { - struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned int eng = 17; - u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; - u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); - return; - } - - mutex_lock(&adev->mman.gtt_window_lock); - gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); - mutex_unlock(&adev->mman.gtt_window_lock); + if (i >= adev->usec_timeout) + dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); } /** @@ -323,59 +303,31 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * * Flush the TLB for the requested pasid. */ -static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub, uint32_t inst) +static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { + uint16_t queried; int vmid, i; - signed long r; - uint32_t seq; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - - if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq[0].ring_lock); - /* 2 dwords flush + 8 dwords fence */ - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return -ETIME; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; - } for (vmid = 1; vmid < 16; vmid++) { - - ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - if (all_hub) { - for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) - gmc_v11_0_flush_gpu_tlb(adev, vmid, - i, flush_type); - } else { - gmc_v11_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB(0), flush_type); - } + bool valid; + + valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, + &queried); + if (!valid || queried != pasid) + continue; + + if (all_hub) { + for_each_set_bit(i, adev->vmhubs_mask, + AMDGPU_MAX_VMHUBS) + gmc_v11_0_flush_gpu_tlb(adev, vmid, i, + flush_type); + } else { + gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), + flush_type); } } - - return 0; } static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, @@ -498,8 +450,7 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) - *addr = adev->vm_manager.vram_base_offset + *addr - - adev->gmc.vram_start; + *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); BUG_ON(*addr & 0xFFFF00000000003FULL); if (!adev->gmc.translate_further) @@ -542,6 +493,7 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, } if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_EXT_COHERENT | AMDGPU_GEM_CREATE_UNCACHED)) *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) | AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); @@ -587,7 +539,7 @@ static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[UMC_HWIP][0]) { + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { case IP_VERSION(8, 10, 0): adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; @@ -610,13 +562,16 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 0, 1): adev->mmhub.funcs = &mmhub_v3_0_1_funcs; break; case IP_VERSION(3, 0, 2): adev->mmhub.funcs = &mmhub_v3_0_2_funcs; break; + case IP_VERSION(3, 3, 0): + adev->mmhub.funcs = &mmhub_v3_3_funcs; + break; default: adev->mmhub.funcs = &mmhub_v3_0_funcs; break; @@ -625,10 +580,13 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 3): adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; break; + case IP_VERSION(11, 5, 0): + adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs; + break; default: adev->gfxhub.funcs = &gfxhub_v3_0_funcs; break; @@ -679,9 +637,13 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, base = adev->mmhub.funcs->get_fb_location(adev); + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); - amdgpu_gmc_gart_location(adev, mc); - amdgpu_gmc_agp_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); + if (!amdgpu_sriov_vf(adev) && + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) && + (amdgpu_agp == 1)) + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ if (amdgpu_sriov_vf(adev)) @@ -766,6 +728,8 @@ static int gmc_v11_0_sw_init(void *handle) adev->mmhub.funcs->init(adev); + adev->gfxhub.funcs->init(adev); + spin_lock_init(&adev->gmc.invalidate_lock); r = amdgpu_atomfirmware_get_vram_info(adev, @@ -775,12 +739,13 @@ static int gmc_v11_0_sw_init(void *handle) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* @@ -935,8 +900,10 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) static int gmc_v11_0_hw_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; /* The sequence of these two function calls matters.*/ gmc_v11_0_init_golden_registers(adev); @@ -974,6 +941,11 @@ static int gmc_v11_0_hw_fini(void *handle) } amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + gmc_v11_0_gart_disable(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 5b837a65f..59d9215e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -211,8 +211,9 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) @@ -253,8 +254,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->mem_scratch.gpu_addr >> 12); WREG32(mmMC_VM_AGP_BASE, 0); - WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); - WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); + WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); + WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); if (gmc_v6_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); @@ -914,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v6_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 6a6929ac2..45a2f8e03 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -239,8 +239,9 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } /** @@ -288,8 +289,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->mem_scratch.gpu_addr >> 12); WREG32(mmMC_VM_AGP_BASE, 0); - WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); - WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); + WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); + WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); @@ -422,28 +423,23 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * * Flush the TLB for the requested pasid. */ -static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub, uint32_t inst) +static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { + u32 mask = 0x0; int vmid; - unsigned int tmp; - - if (amdgpu_in_reset(adev)) - return -EIO; for (vmid = 1; vmid < 16; vmid++) { + u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) + mask |= 1 << vmid; } - return 0; + WREG32(mmVM_INVALIDATE_REQUEST, mask); + RREG32(mmVM_INVALIDATE_RESPONSE); } /* @@ -1103,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v7_0_hw_fini(void *handle) @@ -1273,6 +1269,9 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, if (!addr && !status) return 0; + amdgpu_vm_update_fault_cache(adev, entry->pasid, + ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) gmc_v7_0_set_fault_enable_default(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 5af235202..4422b27a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -413,8 +413,9 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } /** @@ -473,8 +474,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmMC_VM_AGP_BASE, 0); - WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); - WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); + WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); + WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); if (gmc_v8_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); @@ -612,29 +613,23 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * * Flush the TLB for the requested pasid. */ -static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub, uint32_t inst) +static void gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { + u32 mask = 0x0; int vmid; - unsigned int tmp; - - if (amdgpu_in_reset(adev)) - return -EIO; for (vmid = 1; vmid < 16; vmid++) { + u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) + mask |= 1 << vmid; } - return 0; - + WREG32(mmVM_INVALIDATE_REQUEST, mask); + RREG32(mmVM_INVALIDATE_RESPONSE); } /* @@ -1224,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v8_0_hw_fini(void *handle) @@ -1442,6 +1437,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, if (!addr && !status) return 0; + amdgpu_vm_update_fault_cache(adev, entry->pasid, + ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) gmc_v8_0_set_fault_enable_default(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 89550d3df..ced2e1bdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -56,6 +56,7 @@ #include "umc_v6_1.h" #include "umc_v6_0.h" #include "umc_v6_7.h" +#include "umc_v12_0.h" #include "hdp_v4_0.h" #include "mca_v3_0.h" @@ -64,8 +65,6 @@ #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" -#include "amdgpu_reset.h" - /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 @@ -554,6 +553,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_vmhub *hub; const char *mmhub_cid; const char *hub_name; + unsigned int vmhub; u64 addr; uint32_t cam_index = 0; int ret, xcc_id = 0; @@ -566,10 +566,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)]; + vmhub = AMDGPU_MMHUB0(node_id / 4); } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; + vmhub = AMDGPU_MMHUB1(0); } else { hub_name = "gfxhub0"; if (adev->gfx.funcs->ih_node_to_logical_xcc) { @@ -578,8 +578,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (xcc_id < 0) xcc_id = 0; } - hub = &adev->vmhub[xcc_id]; + vmhub = xcc_id; } + hub = &adev->vmhub[vmhub]; if (retry_fault) { if (adev->irq.retry_cam_enabled) { @@ -625,7 +626,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (!printk_ratelimit()) return 0; - memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); @@ -639,7 +639,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); @@ -653,7 +653,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * the new fast GRBM interface. */ if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -661,6 +661,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); + dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); @@ -670,7 +672,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, gfxhub_client_ids[cid], cid); } else { - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(9, 0, 0): mmhub_cid = mmhub_client_ids_vega10[cid][rw]; break; @@ -735,7 +737,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; if (!amdgpu_sriov_vf(adev) && - !adev->gmc.xgmi.connected_to_cpu) { + !adev->gmc.xgmi.connected_to_cpu && + !adev->gmc.is_app_apu) { adev->gmc.ecc_irq.num_types = 1; adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; } @@ -770,8 +773,8 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) return false; return ((vmhub == AMDGPU_MMHUB0(0) || @@ -814,45 +817,32 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); + u32 j, inv_req, tmp, sem, req, ack, inst; const unsigned int eng = 17; - u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS); hub = &adev->vmhub[vmhub]; - if (adev->gmc.xgmi.num_physical_nodes && - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { - /* Vega20+XGMI caches PTEs in TC and TLB. Add a - * heavy-weight TLB flush (type 2), which flushes - * both. Due to a race condition with concurrent - * memory accesses using the same TLB cache line, we - * still need a second TLB flush after this. - */ - inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); - inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); - } else if (flush_type == 2 && - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && - adev->rev_id == 0) { - inv_req = gmc_v9_0_get_invalidate_req(vmid, 0); - inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); - } else { - inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); - inv_req2 = 0; - } + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); + sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; + req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal */ - if (adev->gfx.kiq[0].ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - down_read_trylock(&adev->reset_domain->sem)) { + if (vmhub >= AMDGPU_MMHUB0(0)) + inst = GET_INST(GC, 0); + else + inst = vmhub; + if (adev->gfx.kiq[inst].ring.sched.ready && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); - up_read(&adev->reset_domain->sem); + 1 << vmid, inst); return; } @@ -870,9 +860,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acquire */ if (vmhub >= AMDGPU_MMHUB0(0)) - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst); else - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); + tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst); if (tmp & 0x1) break; udelay(1); @@ -882,35 +872,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - do { - if (vmhub >= AMDGPU_MMHUB0(0)) - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); - else - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); - - /* - * Issue a dummy read to wait for the ACK register to - * be cleared to avoid a false ACK due to the new fast - * GRBM interface. - */ - if ((vmhub == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) - RREG32_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng); + if (vmhub >= AMDGPU_MMHUB0(0)) + WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst); + else + WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst); - for (j = 0; j < adev->usec_timeout; j++) { - if (vmhub >= AMDGPU_MMHUB0(0)) - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - else - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - if (tmp & (1 << vmid)) - break; - udelay(1); - } + /* + * Issue a dummy read to wait for the ACK register to + * be cleared to avoid a false ACK due to the new fast + * GRBM interface. + */ + if ((vmhub == AMDGPU_GFXHUB(0)) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + RREG32_NO_KIQ(req); - inv_req = inv_req2; - inv_req2 = 0; - } while (inv_req); + for (j = 0; j < adev->usec_timeout; j++) { + if (vmhub >= AMDGPU_MMHUB0(0)) + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst); + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst); + if (tmp & (1 << vmid)) + break; + udelay(1); + } /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) { @@ -919,9 +903,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * write with 0 means semaphore release */ if (vmhub >= AMDGPU_MMHUB0(0)) - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst); else - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst); } spin_unlock(&adev->gmc.invalidate_lock); @@ -943,91 +927,32 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * * Flush the TLB for the requested pasid. */ -static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub, uint32_t inst) +static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { - int vmid, i; - signed long r; - uint32_t seq; - uint16_t queried_pasid; - bool ret; - u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; - - if (amdgpu_in_reset(adev)) - return -EIO; - - if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) { - /* Vega20+XGMI caches PTEs in TC and TLB. Add a - * heavy-weight TLB flush (type 2), which flushes - * both. Due to a race condition with concurrent - * memory accesses using the same TLB cache line, we - * still need a second TLB flush after this. - */ - bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); - /* 2 dwords flush + 8 dwords fence */ - unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; - - if (vega20_xgmi_wa) - ndw += kiq->pmf->invalidate_tlbs_size; - - spin_lock(&adev->gfx.kiq[inst].ring_lock); - /* 2 dwords flush + 8 dwords fence */ - amdgpu_ring_alloc(ring, ndw); - if (vega20_xgmi_wa) - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, 2, all_hub); - - if (flush_type == 2 && - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && - adev->rev_id == 0) - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, 0, all_hub); - - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq[inst].ring_lock); - up_read(&adev->reset_domain->sem); - return -ETIME; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[inst].ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - up_read(&adev->reset_domain->sem); - return -ETIME; - } - up_read(&adev->reset_domain->sem); - return 0; - } + uint16_t queried; + int i, vmid; for (vmid = 1; vmid < 16; vmid++) { + bool valid; - ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - if (all_hub) { - for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) - gmc_v9_0_flush_gpu_tlb(adev, vmid, - i, flush_type); - } else { - gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB(0), flush_type); - } - break; + valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried); + if (!valid || queried != pasid) + continue; + + if (all_hub) { + for_each_set_bit(i, adev->vmhubs_mask, + AMDGPU_MAX_VMHUBS) + gmc_v9_0_flush_gpu_tlb(adev, vmid, i, + flush_type); + } else { + gmc_v9_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB(0), + flush_type); } } - - return 0; - } static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, @@ -1183,14 +1108,15 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; - bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; + bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT); + bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; bool snoop = false; bool is_local; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): if (is_vram) { @@ -1204,8 +1130,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, /* FIXME: is this still needed? Or does * amdgpu_ttm_tt_pde_flags already handle this? */ - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) && + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 3)) && adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { @@ -1251,6 +1179,11 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, snoop = true; if (uncached) { mtype = MTYPE_UC; + } else if (ext_coherent) { + if (adev->rev_id) + mtype = is_local ? MTYPE_CC : MTYPE_UC; + else + mtype = MTYPE_UC; } else if (adev->flags & AMD_IS_APU) { mtype = is_local ? mtype_local : MTYPE_NC; } else { @@ -1314,23 +1247,26 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system * memory can use more efficient MTYPEs. */ - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) return; /* Only direct-mapped memory allows us to determine the NUMA node from * the DMA address. */ if (!adev->ram_is_direct_mapped) { - dev_dbg(adev->dev, "RAM is not direct mapped\n"); + dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n"); return; } - /* Only override mappings with MTYPE_NC, which is the safe default for - * cacheable memory. + /* MTYPE_NC is the same default and can be overridden. + * MTYPE_UC will be present if the memory is extended-coherent + * and can also be overridden. */ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != - AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { - dev_dbg(adev->dev, "MTYPE is not NC\n"); + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) && + (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != + AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) { + dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n"); return; } @@ -1341,7 +1277,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, if (adev->gmc.is_app_apu && vm->mem_id >= 0) { local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; } else { - dev_dbg(adev->dev, "Only native mode APU is supported.\n"); + dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n"); return; } @@ -1349,25 +1285,33 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, * page or NUMA nodes. */ if (!page_is_ram(addr >> PAGE_SHIFT)) { - dev_dbg(adev->dev, "Page is not RAM.\n"); + dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n"); return; } nid = pfn_to_nid(addr >> PAGE_SHIFT); - dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", - vm->mem_id, local_node, nid); + dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", + vm->mem_id, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; - unsigned int mtype_local = MTYPE_RW; - - if (amdgpu_mtype_local == 1) - mtype_local = MTYPE_NC; - else if (amdgpu_mtype_local == 2) - mtype_local = MTYPE_CC; + if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) == + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { + unsigned int mtype_local = MTYPE_RW; + + if (amdgpu_mtype_local == 1) + mtype_local = MTYPE_NC; + else if (amdgpu_mtype_local == 2) + mtype_local = MTYPE_CC; + + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(mtype_local); + } else if (adev->rev_id) { + /* MTYPE_UC case */ + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); + } - *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | - AMDGPU_PTE_MTYPE_VG10(mtype_local); - dev_dbg(adev->dev, "flags updated from %llx to %llx\n", - old_flags, *flags); + dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n", + old_flags, *flags); } } @@ -1383,7 +1327,7 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) } else { u32 viewport; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); @@ -1454,7 +1398,7 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[UMC_HWIP][0]) { + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { case IP_VERSION(6, 0, 0): adev->umc.funcs = &umc_v6_0_funcs; break; @@ -1490,6 +1434,19 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) else adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; break; + case IP_VERSION(12, 0, 0): + adev->umc.max_ras_err_cnt_per_query = + UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; + adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; + adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; + adev->umc.active_mask = adev->aid_mask; + adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; + adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0]; + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) + adev->umc.ras = &umc_v12_0_ras; + break; default: break; } @@ -1497,7 +1454,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(9, 4, 1): adev->mmhub.funcs = &mmhub_v9_4_funcs; break; @@ -1515,7 +1472,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(9, 4, 0): adev->mmhub.ras = &mmhub_v1_0_ras; break; @@ -1536,7 +1493,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) adev->gfxhub.funcs = &gfxhub_v1_2_funcs; else adev->gfxhub.funcs = &gfxhub_v1_0_funcs; @@ -1552,7 +1509,7 @@ static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) struct amdgpu_mca *mca = &adev->mca; /* is UMC the right IP to check for MCA? Maybe DF? */ - switch (adev->ip_versions[UMC_HWIP][0]) { + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { case IP_VERSION(6, 7, 0): if (!adev->gmc.xgmi.connected_to_cpu) { mca->mp0.ras = &mca_v3_0_mp0_ras; @@ -1579,18 +1536,18 @@ static int gmc_v9_0_early_init(void *handle) * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined * in their IP discovery tables */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) adev->gmc.xgmi.supported = true; - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { adev->gmc.xgmi.supported = true; adev->gmc.xgmi.connected_to_cpu = adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); } - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { enum amdgpu_pkg_type pkg_type = adev->smuio.funcs->get_pkg_type(adev); /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present @@ -1639,7 +1596,7 @@ static int gmc_v9_0_late_init(void *handle) * writes, while disables HBM ECC for vega10. */ if (!amdgpu_sriov_vf(adev) && - (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { + (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { if (adev->df.funcs && adev->df.funcs->enable_ecc_force_par_wr_rmw) @@ -1648,13 +1605,8 @@ static int gmc_v9_0_late_init(void *handle) } if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && - adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) - adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); - - if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && - adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) - adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev); + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP); } r = amdgpu_gmc_ras_late_init(adev); @@ -1669,14 +1621,17 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = adev->mmhub.funcs->get_fb_location(adev); + amdgpu_gmc_set_agp_default(adev, mc); + /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; if (adev->gmc.xgmi.connected_to_cpu) { amdgpu_gmc_sysvm_location(adev, mc); } else { amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc); - amdgpu_gmc_agp_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) + amdgpu_gmc_agp_location(adev, mc); } /* base offset of vram pages */ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); @@ -1747,7 +1702,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) /* set the gart size */ if (amdgpu_gart_size == -1) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ case IP_VERSION(9, 4, 0): @@ -1826,8 +1781,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) */ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) { - if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } @@ -1879,15 +1834,14 @@ static void gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges) { - int num_ranges = 0, ret, mem_groups; struct amdgpu_numa_info numa_info; int node_ids[MAX_MEM_RANGES]; + int num_ranges = 0, ret; int num_xcc, xcc_id; uint32_t xcc_mask; num_xcc = NUM_XCC(adev->gfx.xcc_mask); xcc_mask = (1U << num_xcc) - 1; - mem_groups = hweight32(adev->aid_mask); for_each_inst(xcc_id, xcc_mask) { ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); @@ -1912,12 +1866,6 @@ gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, } adev->gmc.num_mem_partitions = num_ranges; - - /* If there is only partition, don't use entire size */ - if (adev->gmc.num_mem_partitions == 1) { - mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1); - do_div(mem_ranges[0].size, mem_groups); - } } static void @@ -1972,10 +1920,9 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) { bool valid; - adev->gmc.mem_partitions = kzalloc( - MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info), - GFP_KERNEL); - + adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES, + sizeof(struct amdgpu_mem_partition_info), + GFP_KERNEL); if (!adev->gmc.mem_partitions) return -ENOMEM; @@ -2000,13 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { - static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; - u32 vram_info; - - if (!amdgpu_sriov_vf(adev)) { - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); - adev->gmc.vram_vendor = vram_info & 0xF; - } adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; } @@ -2023,7 +1963,7 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { gmc_v9_4_3_init_vram_info(adev); } else if (!adev->bios) { if (adev->flags & AMD_IS_APU) { @@ -2063,7 +2003,7 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; } - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -2091,12 +2031,9 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ - if (amdgpu_sriov_vf(adev)) - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); - else - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; case IP_VERSION(9, 4, 1): @@ -2128,7 +2065,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); if (r) @@ -2142,7 +2079,8 @@ static int gmc_v9_0_sw_init(void *handle) return r; if (!amdgpu_sriov_vf(adev) && - !adev->gmc.xgmi.connected_to_cpu) { + !adev->gmc.xgmi.connected_to_cpu && + !adev->gmc.is_app_apu) { /* interrupt sent to DF. */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, &adev->gmc.ecc_irq); @@ -2156,7 +2094,10 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ - dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; + dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(9, 4, 2) ? + 48 : + 44; r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); if (r) { dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); @@ -2170,7 +2111,7 @@ static int gmc_v9_0_sw_init(void *handle) amdgpu_gmc_get_vbios_allocations(adev); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { r = gmc_v9_0_init_mem_ranges(adev); if (r) return r; @@ -2196,9 +2137,11 @@ static int gmc_v9_0_sw_init(void *handle) * for video processing. */ adev->vm_manager.first_kfd_vmid = - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8; + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ? + 3 : + 8; amdgpu_vm_manager_init(adev); @@ -2208,7 +2151,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) amdgpu_gmc_sysfs_init(adev); return 0; @@ -2218,7 +2161,7 @@ static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) amdgpu_gmc_sysfs_fini(adev); amdgpu_gmc_ras_fini(adev); @@ -2241,8 +2184,7 @@ static int gmc_v9_0_sw_fini(void *handle) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) { - - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(9, 0, 0): if (amdgpu_sriov_vf(adev)) break; @@ -2276,8 +2218,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) */ void gmc_v9_0_restore_registers(struct amdgpu_device *adev) { - if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) { WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); WARN_ON(adev->gmc.sdpif_register != RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); @@ -2330,6 +2272,24 @@ static int gmc_v9_0_hw_init(void *handle) bool value; int i, r; + adev->gmc.flush_pasid_uses_kiq = true; + + /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush + * (type 2), which flushes both. Due to a race condition with + * concurrent memory accesses using the same TLB cache line, we still + * need a second TLB flush after this. + */ + adev->gmc.flush_tlb_needs_extra_type_2 = + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) && + adev->gmc.xgmi.num_physical_nodes; + /* + * TODO: This workaround is badly documented and had a buggy + * implementation. We should probably verify what we do here. + */ + adev->gmc.flush_tlb_needs_extra_type_0 = + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && + adev->rev_id == 0; + /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -2373,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } /** @@ -2413,6 +2373,10 @@ static int gmc_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index 71d1a2e3b..4db6bb73e 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -49,8 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) || - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2)) + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0) || + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) return; if (!ring || !ring->funcs->emit_wreg) @@ -80,7 +80,7 @@ static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev) if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) return; - if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0)) + if (amdgpu_ip_version(adev, HDP_HWIP, 0) >= IP_VERSION(4, 4, 0)) WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0); else /*read back hdp ras counter to reset it to 0 */ @@ -92,10 +92,10 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev, { uint32_t def, data; - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) || - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) || - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) || - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) { + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 0) || + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 1) || + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 1) || + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 0)) { def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) @@ -129,6 +129,11 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev, { int data; + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) { + /* Default enabled */ + *flags |= AMD_CG_SUPPORT_HDP_MGCG; + return; + } /* AMD_CG_SUPPORT_HDP_LS */ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) @@ -137,7 +142,7 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev, static void hdp_v4_0_init_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[HDP_HWIP][0]) { + switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { case IP_VERSION(4, 2, 1): WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); break; @@ -145,9 +150,13 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev) break; } + /* Do not program registers if VF */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0)) WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2); WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index 063eba619..ab06c2b4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -28,6 +28,9 @@ #include "hdp/hdp_6_0_0_sh_mask.h" #include +#define regHDP_CLK_CNTL_V6_1 0xd5 +#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0 + static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { @@ -40,7 +43,7 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, bool enable) { - uint32_t hdp_clk_cntl, hdp_clk_cntl1; + uint32_t hdp_clk_cntl; uint32_t hdp_mem_pwr_cntl; if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | @@ -48,14 +51,20 @@ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, AMD_CG_SUPPORT_HDP_SD))) return; - hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL); + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) + hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1); + else + hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL); hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL); /* Before doing clock/power mode switch, * forced on IPH & RC clock */ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, RC_MEM_CLK_SOFT_OVERRIDE, 1); - WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl); + else + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); /* disable clock and power gating before any changing */ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, @@ -117,7 +126,10 @@ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, /* disable IPH & RC clock override after clock/power mode changing */ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, RC_MEM_CLK_SOFT_OVERRIDE, 0); - WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl); + else + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); } static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index aecad530b..2c02ae698 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index ec0c8f8b4..ad4ad39f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } @@ -641,8 +647,6 @@ static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev, if (def != data) WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data); } - - return; } static int ih_v6_0_set_clockgating_state(void *handle, @@ -726,8 +730,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags) if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL)) *flags |= AMD_CG_SUPPORT_IH_CG; - - return; } static const struct amd_ip_funcs ih_v6_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 8fb05eae3..b8da0fc29 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 4ab90c785..c0bdab3bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -36,10 +36,11 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin"); static int imu_v11_0_init_microcode(struct amdgpu_device *adev) { - char fw_name[40]; + char fw_name[45]; char ucode_prefix[30]; int err; const struct imu_firmware_header_v1_0 *imu_hdr; @@ -54,7 +55,6 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data; - adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version); //adev->gfx.imu_feature_version = le32_to_cpu(imu_hdr->ucode_feature_version); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { @@ -68,7 +68,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) info->fw = adev->gfx.imu_fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE); - } + } else + adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version); out: if (err) { @@ -352,7 +353,7 @@ static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): program_imu_rlc_ram(adev, imu_rlc_ram_golden_11, (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11)); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index aadb74de5..e67a33745 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -128,7 +128,7 @@ static int jpeg_v2_5_sw_init(void *handle) ring = adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0)) ring->vm_hub = AMDGPU_MMHUB1(0); else ring->vm_hub = AMDGPU_MMHUB0(0); @@ -822,7 +822,7 @@ static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[JPEG_HWIP][0]) { + switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { case IP_VERSION(2, 6, 0): adev->jpeg.ras = &jpeg_v2_6_ras; break; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index df4440c21..a92481da6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -52,7 +52,7 @@ static int jpeg_v3_0_early_init(void *handle) u32 harvest; - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): break; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 3eb3dcd56..bc38b90f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -431,6 +431,10 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) end.cmd_header.command_type = MMSCH_COMMAND__END; + size = sizeof(struct mmsch_v4_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy(&header, (void *)table_loc, size); + header.version = MMSCH_VERSION; header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE); @@ -468,6 +472,9 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) table_loc = (uint32_t *)table->cpu_addr; memcpy((void *)table_loc, &header, size); + /* Perform HDP flush before writing to MMSCH registers */ + amdgpu_device_flush_hdp(adev, NULL); + /* message MMSCH (in VCN[0]) to initialize this client * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr * of memory descriptor location @@ -515,8 +522,11 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) return -EBUSY; } } - if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && init_status != MMSCH_VF_ENGINE_STATUS__PASS) + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status); + return -EINVAL; + } return 0; @@ -831,7 +841,7 @@ static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[JPEG_HWIP][0]) { + switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { case IP_VERSION(4, 0, 0): adev->jpeg.ras = &jpeg_v4_0_ras; break; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 1de79d660..82b6b62c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -654,9 +654,11 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) */ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + if (!amdgpu_sriov_vf(ring->adev)) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + } amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); @@ -672,9 +674,11 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) */ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x62a04); + if (!amdgpu_sriov_vf(ring->adev)) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04); + } amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); @@ -950,6 +954,11 @@ static int jpeg_v4_0_3_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + if (amdgpu_sriov_vf(adev)) { + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->jpeg.cur_state) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c new file mode 100644 index 000000000..6ede85b28 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -0,0 +1,623 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" +#include "jpeg_v4_0_5.h" +#include "mmsch_v4_0.h" + +#include "vcn/vcn_4_0_5_offset.h" +#include "vcn/vcn_4_0_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" + +#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v4_0_5_set_powergating_state(void *handle, + enum amd_powergating_state state); + +static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring); + +/** + * jpeg_v4_0_5_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v4_0_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + + adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; + + jpeg_v4_0_5_set_dec_ring_funcs(adev); + jpeg_v4_0_5_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v4_0_5_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v4_0_5_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + /* JPEG DJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); + if (r) + return r; + + /* JPEG EJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = amdgpu_sriov_vf(adev) ? + (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); + ring->vm_hub = AMDGPU_MMHUB0(0); + + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v4_0_5_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v4_0_5_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v4_0_5_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v4_0_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int r; + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v4_0_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v4_0_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (!amdgpu_sriov_vf(adev)) { + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) + jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + } + amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); + + return 0; +} + +/** + * jpeg_v4_0_5_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v4_0_5_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v4_0_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v4_0_5_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v4_0_5_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v4_0_5_hw_init(adev); + + return r; +} + +static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); + } else { + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + } + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); +} + +static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; + } else { + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + } + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); +} + +static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev) +{ + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), + 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, + 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); + } + + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* keep the JPEG in static PG mode */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); + + return 0; +} + +static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) +{ + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), + 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); + } + + return 0; +} + +/** + * jpeg_v4_0_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v4_0_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + /* doorbell programming is done for every playback */ + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + + /* disable power gating */ + r = jpeg_v4_0_5_disable_static_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v4_0_5_disable_clock_gating(adev); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v4_0_5_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v4_0_5_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v4_0_5_enable_static_power_gating(adev); + if (r) + return r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; +} + +/** + * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static bool jpeg_v4_0_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 1; + + ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + + return ret; +} + +static int jpeg_v4_0_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v4_0_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (!jpeg_v4_0_5_is_idle(handle)) + return -EBUSY; + jpeg_v4_0_5_enable_clock_gating(adev); + } else { + jpeg_v4_0_5_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v4_0_5_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (amdgpu_sriov_vf(adev)) { + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v4_0_5_stop(adev); + else + ret = jpeg_v4_0_5_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_4_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(adev->jpeg.inst->ring_dec); + break; + case VCN_4_0__SRCID_DJPEG0_POISON: + case VCN_4_0__SRCID_EJPEG0_POISON: + amdgpu_jpeg_process_poison_irq(adev, source, entry); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { + .name = "jpeg_v4_0_5", + .early_init = jpeg_v4_0_5_early_init, + .late_init = NULL, + .sw_init = jpeg_v4_0_5_sw_init, + .sw_fini = jpeg_v4_0_5_sw_fini, + .hw_init = jpeg_v4_0_5_hw_init, + .hw_fini = jpeg_v4_0_5_hw_fini, + .suspend = jpeg_v4_0_5_suspend, + .resume = jpeg_v4_0_5_resume, + .is_idle = jpeg_v4_0_5_is_idle, + .wait_for_idle = jpeg_v4_0_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, + .set_powergating_state = jpeg_v4_0_5_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, + .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, + .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; + DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { + .set = jpeg_v4_0_5_set_interrupt_state, + .process = jpeg_v4_0_5_process_interrupt, +}; + +static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = { + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 4, + .minor = 0, + .rev = 5, + .funcs = &jpeg_v4_0_5_ip_funcs, +}; + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h new file mode 100644 index 000000000..c5eee5720 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h @@ -0,0 +1,35 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V4_0_5_H__ +#define __JPEG_V4_0_5_H__ + +enum amdgpu_jpeg_v4_0_5_sub_block { + AMDGPU_JPEG_V4_0_5_JPEG0 = 0, + + AMDGPU_JPEG_V4_0_5_MAX_SUB_BLOCK, +}; + +extern const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block; + +#endif /* __JPEG_V4_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index eb06d7498..1e5ad1e08 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -558,7 +558,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF); /* invalidate ICACHE */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); break; @@ -568,7 +568,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev, } data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); break; @@ -578,7 +578,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev, } /* prime the ICACHE. */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); break; @@ -587,7 +587,7 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev, break; } data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); break; @@ -995,7 +995,7 @@ static void mes_v10_1_kiq_setting(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 6827d5470..4dfec56e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -47,6 +47,9 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin"); + static int mes_v11_0_hw_fini(void *handle); static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); @@ -403,6 +406,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.disable_mes_log = 1; mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; + mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; return mes_v11_0_submit_pkt_and_poll_completion(mes, @@ -410,60 +414,6 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); } -static void mes_v11_0_init_aggregated_doorbell(struct amdgpu_mes *mes) -{ - struct amdgpu_device *adev = mes->adev; - uint32_t data; - - data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1); - data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK | - CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK | - CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK); - data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] << - CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT; - data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data); - - data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2); - data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK | - CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK | - CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK); - data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] << - CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT; - data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data); - - data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3); - data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK | - CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK | - CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK); - data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] << - CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT; - data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data); - - data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4); - data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK | - CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK | - CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK); - data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] << - CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT; - data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data); - - data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5); - data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK | - CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK | - CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK); - data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] << - CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT; - data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data); - - data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT; - WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data); -} - static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .add_hw_queue = mes_v11_0_add_hw_queue, .remove_hw_queue = mes_v11_0_remove_hw_queue, @@ -1239,8 +1189,6 @@ static int mes_v11_0_hw_init(void *handle) if (r) goto failure; - mes_v11_0_init_aggregated_doorbell(&adev->mes); - r = mes_v11_0_query_sched_status(&adev->mes); if (r) { DRM_ERROR("MES is busy\n"); @@ -1313,7 +1261,7 @@ static int mes_v11_0_late_init(void *handle) /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && - (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))) amdgpu_mes_self_test(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index fb91b3105..e3ddd22aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -242,7 +244,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) block_size -= 9; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 9086f2fdf..92432cd2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -274,7 +274,7 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) block_size -= 9; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 3d8e579d5..9b0146732 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -344,7 +344,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, - i); + i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, @@ -626,6 +626,14 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, void *ras_err_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + unsigned long ue_count = 0, ce_count = 0; + + /* NOTE: mmhub is converted by aid_mask and the range is 0-3, + * which can be used as die ID directly */ + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = mmhub_inst, + }; amdgpu_ras_inst_query_ras_error_count(adev, mmhub_v1_8_ce_reg_list, @@ -634,7 +642,7 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, ARRAY_SIZE(mmhub_v1_8_ras_memory_list), mmhub_inst, AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, - &err_data->ce_count); + &ce_count); amdgpu_ras_inst_query_ras_error_count(adev, mmhub_v1_8_ue_reg_list, ARRAY_SIZE(mmhub_v1_8_ue_reg_list), @@ -642,7 +650,10 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, ARRAY_SIZE(mmhub_v1_8_ras_memory_list), mmhub_inst, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - &err_data->ue_count); + &ue_count); + + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); } static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, @@ -689,152 +700,9 @@ static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) mmhub_v1_8_inst_reset_ras_error_count(adev, i); } -static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = { - regMMEA0_ERR_STATUS, - regMMEA1_ERR_STATUS, - regMMEA2_ERR_STATUS, - regMMEA3_ERR_STATUS, - regMMEA4_ERR_STATUS, -}; - -static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev, - uint32_t mmhub_inst) -{ - uint32_t reg_value; - uint32_t mmea_err_status_addr_dist; - uint32_t i; - - /* query mmea ras err status */ - mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; - for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { - reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_ERR_STATUS, - i * mmea_err_status_addr_dist); - if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || - REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || - REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { - dev_warn(adev->dev, - "Detected MMEA%d err in MMHUB%d, status: 0x%x\n", - i, mmhub_inst, reg_value); - } - } - - /* query mm_cane ras err status */ - reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); - if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) || - REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) || - REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) { - dev_warn(adev->dev, - "Detected MM CANE err in MMHUB%d, status: 0x%x\n", - mmhub_inst, reg_value); - } -} - -static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev) -{ - uint32_t inst_mask; - uint32_t i; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { - dev_warn(adev->dev, "MMHUB RAS is not supported\n"); - return; - } - - inst_mask = adev->aid_mask; - for_each_inst(i, inst_mask) - mmhub_v1_8_inst_query_ras_err_status(adev, i); -} - -static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, - uint32_t mmhub_inst) -{ - uint32_t mmea_cgtt_clk_cntl_addr_dist; - uint32_t mmea_err_status_addr_dist; - uint32_t reg_value; - uint32_t i; - - /* reset mmea ras err status */ - mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL; - mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; - for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { - /* force clk branch on for response path - * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1 - */ - reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_CGTT_CLK_CTRL, - i * mmea_cgtt_clk_cntl_addr_dist); - reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, - SOFT_OVERRIDE_RETURN, 1); - WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_CGTT_CLK_CTRL, - i * mmea_cgtt_clk_cntl_addr_dist, - reg_value); - - /* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ - reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_ERR_STATUS, - i * mmea_err_status_addr_dist); - reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS, - CLEAR_ERROR_STATUS, 1); - WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_ERR_STATUS, - i * mmea_err_status_addr_dist, - reg_value); - - /* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */ - reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_CGTT_CLK_CTRL, - i * mmea_cgtt_clk_cntl_addr_dist); - reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, - SOFT_OVERRIDE_RETURN, 0); - WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, - regMMEA0_CGTT_CLK_CTRL, - i * mmea_cgtt_clk_cntl_addr_dist, - reg_value); - } - - /* reset mm_cane ras err status - * force clk branch on for response path - * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1 - */ - reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); - reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, - SOFT_OVERRIDE_ATRET, 1); - WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); - - /* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ - reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); - reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS, - CLEAR_ERROR_STATUS, 1); - WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value); - - /* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */ - reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); - reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, - SOFT_OVERRIDE_ATRET, 0); - WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); -} - -static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev) -{ - uint32_t inst_mask; - uint32_t i; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { - dev_warn(adev->dev, "MMHUB RAS is not supported\n"); - return; - } - - inst_mask = adev->aid_mask; - for_each_inst(i, inst_mask) - mmhub_v1_8_inst_reset_ras_err_status(adev, i); -} - static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { .query_ras_error_count = mmhub_v1_8_query_ras_error_count, .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, - .query_ras_error_status = mmhub_v1_8_query_ras_error_status, - .reset_ras_error_status = mmhub_v1_8_reset_ras_error_status, }; struct amdgpu_mmhub_ras mmhub_v1_8_ras = { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 8f76c6ecf..02fd45261 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -151,7 +151,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; @@ -367,7 +367,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); @@ -568,7 +568,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) return; - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): @@ -601,7 +601,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): @@ -625,7 +625,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) return; - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): @@ -651,7 +651,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): case IP_VERSION(2, 1, 0): @@ -676,7 +676,7 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 1dce053a4..5eb8122e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -90,7 +90,7 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 3, 0): case IP_VERSION(2, 4, 0): case IP_VERSION(2, 4, 1): @@ -285,7 +285,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 441379e91..7d5242df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -107,7 +107,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 1): mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw]; @@ -189,8 +189,7 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -324,7 +323,7 @@ static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 12c7f4b46..134c4ec10 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -108,7 +108,7 @@ mmhub_v3_0_1_print_l2_protection_fault_status(struct amdgpu_device *adev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 0, 1): mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw]; break; @@ -188,8 +188,7 @@ static void mmhub_v3_0_1_init_system_aperture_regs(struct amdgpu_device *adev) max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ - value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -311,7 +310,7 @@ static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 5dadc85ab..f0f182f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -181,8 +181,7 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev) } /* Set default page address. */ - value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -316,7 +315,7 @@ static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) uint32_t tmp; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c new file mode 100644 index 000000000..dc4812ecc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -0,0 +1,589 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "mmhub_v3_3.h" + +#include "mmhub/mmhub_3_3_0_offset.h" +#include "mmhub/mmhub_3_3_0_sh_mask.h" + +#include "navi10_enum.h" +#include "soc15_common.h" + +#define regMMVM_L2_CNTL3_DEFAULT 0x80100007 +#define regMMVM_L2_CNTL4_DEFAULT 0x000000c1 +#define regMMVM_L2_CNTL5_DEFAULT 0x00003fe0 + +static const char *mmhub_client_ids_v3_3[][2] = { + [0][0] = "VMC", + [4][0] = "DCEDMC", + [6][0] = "MP0", + [7][0] = "MP1", + [8][0] = "MPM", + [24][0] = "HDP", + [25][0] = "LSDMA", + [26][0] = "JPEG", + [27][0] = "VPE", + [29][0] = "VCNU", + [30][0] = "VCN", + [3][1] = "DCEDWB", + [4][1] = "DCEDMC", + [6][1] = "MP0", + [7][1] = "MP1", + [8][1] = "MPM", + [21][1] = "OSSSYS", + [24][1] = "HDP", + [25][1] = "LSDMA", + [26][1] = "JPEG", + [27][1] = "VPE", + [29][1] = "VCNU", + [30][1] = "VCN", +}; + +static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type ? : 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + uint32_t cid, rw; + const char *mmhub_cid = NULL; + + cid = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, RW); + + dev_err(adev->dev, + "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { + case IP_VERSION(3, 3, 0): + mmhub_cid = mmhub_client_ids_v3_3[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; + } + + if (!mmhub_cid && cid == 0x140) + mmhub_cid = "UMSCH"; + + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%x\n", rw); +} + +static void mmhub_v3_3_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); + +} + +static void mmhub_v3_3_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v3_3_setup_vm_pt_regs(adev, 0, pt_base); + + WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); +} + +static void mmhub_v3_3_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + uint32_t tmp; + + /* Program the AGP BAR */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); +} + +static void mmhub_v3_3_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC); /* UC, uncached */ + + WREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void mmhub_v3_3_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, + ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL2, tmp); + + tmp = regMMVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL3, tmp); + + tmp = regMMVM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL4, tmp); + + tmp = regMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); +} + +static void mmhub_v3_3_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + + WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_CNTL, tmp); +} + +static void mmhub_v3_3_disable_identity_aperture(struct amdgpu_device *adev) +{ + WREG32_SOC15(MMHUB, 0, + regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0xFFFFFFFF); + WREG32_SOC15(MMHUB, 0, + regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(MMHUB, 0, + regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); + WREG32_SOC15(MMHUB, 0, + regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); + + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, + 0); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, + 0); +} + +static void mmhub_v3_3_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + int i; + uint32_t tmp; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + adev->vm_manager.num_level); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !amdgpu_noretry); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } + + hub->vm_cntx_cntl = tmp; +} + +static void mmhub_v3_3_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + unsigned int i; + + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int mmhub_v3_3_gart_enable(struct amdgpu_device *adev) +{ + /* GART Enable. */ + mmhub_v3_3_init_gart_aperture_regs(adev); + mmhub_v3_3_init_system_aperture_regs(adev); + mmhub_v3_3_init_tlb_regs(adev); + mmhub_v3_3_init_cache_regs(adev); + + mmhub_v3_3_enable_system_domain(adev); + mmhub_v3_3_disable_identity_aperture(adev); + mmhub_v3_3_setup_vmid_config(adev); + mmhub_v3_3_program_invalidation(adev); + + return 0; +} + +static void mmhub_v3_3_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL, tmp); + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL, tmp); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL3, 0); +} + +/** + * mmhub_v3_3_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void mmhub_v3_3_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + + tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static const struct amdgpu_vmhub_funcs mmhub_v3_3_vmhub_funcs = { + .print_l2_protection_fault_status = mmhub_v3_3_print_l2_protection_fault_status, + .get_invalidate_req = mmhub_v3_3_get_invalidate_req, +}; + +static void mmhub_v3_3_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(MMHUB, 0, + regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(MMHUB, 0, + regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = regMMVM_CONTEXT1_CNTL - regMMVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = regMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regMMVM_INVALIDATE_ENG1_REQ - + regMMVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &mmhub_v3_3_vmhub_funcs; +} + +static u64 mmhub_v3_3_get_fb_location(struct amdgpu_device *adev) +{ + u64 base; + + base = RREG32_SOC15(MMHUB, 0, regMMMC_VM_FB_LOCATION_BASE); + base &= MMMC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + return base; +} + +static u64 mmhub_v3_3_get_mc_fb_offset(struct amdgpu_device *adev) +{ + u64 offset; + + offset = RREG32_SOC15(MMHUB, 0, regMMMC_VM_FB_OFFSET); + offset &= MMMC_VM_FB_OFFSET__FB_OFFSET_MASK; + offset <<= 24; + + return offset; +} + +static void mmhub_v3_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); + + if (enable) + data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; + else + data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; + + if (def != data) + WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data); +} + +static void mmhub_v3_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); + + if (enable) + data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + else + data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + + if (def != data) + WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data); +} + +static int mmhub_v3_3_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + if (amdgpu_sriov_vf(adev)) + return 0; + + mmhub_v3_3_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + mmhub_v3_3_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + return 0; +} + +static void mmhub_v3_3_get_clockgating(struct amdgpu_device *adev, u64 *flags) +{ + int data; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); + + /* AMD_CG_SUPPORT_MC_MGCG */ + if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_MC_MGCG; + + /* AMD_CG_SUPPORT_MC_LS */ + if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_MC_LS; +} + +const struct amdgpu_mmhub_funcs mmhub_v3_3_funcs = { + .init = mmhub_v3_3_init, + .get_fb_location = mmhub_v3_3_get_fb_location, + .get_mc_fb_offset = mmhub_v3_3_get_mc_fb_offset, + .gart_enable = mmhub_v3_3_gart_enable, + .set_fault_enable_default = mmhub_v3_3_set_fault_enable_default, + .gart_disable = mmhub_v3_3_gart_disable, + .set_clockgating = mmhub_v3_3_set_clockgating, + .get_clockgating = mmhub_v3_3_get_clockgating, + .setup_vm_pt_regs = mmhub_v3_3_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h new file mode 100644 index 000000000..37b62c7e5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMHUB_V3_3_H__ +#define __MMHUB_V3_3_H__ + +extern const struct amdgpu_mmhub_funcs mmhub_v3_3_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 5718e4d40..1b7da4aff 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -308,7 +308,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i); + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h index 796d4f879..ced26cc51 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h @@ -35,13 +35,11 @@ #define MMSCH_VF_ENGINE_STATUS__PASS 0x1 -#define MMSCH_VF_MAILBOX_RESP__OK 0x1 -#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 - -#define MMSCH_VF_ENGINE_STATUS__PASS 0x1 - -#define MMSCH_VF_MAILBOX_RESP__OK 0x1 -#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_VF_MAILBOX_RESP__OK 0x1 +#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3 +#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4 +#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5 #define MMSCH_V4_0_VCN_INSTANCES 0x2 diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index af1a78469..c520b2fab 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -62,7 +62,9 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev); int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); -#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 -#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1 +#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE \ + (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4) +#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE \ + (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index b6a8478da..de9361472 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -107,7 +107,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev, { u32 ih_cntl, ih_rb_cntl; - if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3)) + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) < IP_VERSION(5, 0, 3)) return; ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2); @@ -330,7 +330,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { if (ih[0]->use_bus_addr) { - switch (adev->ip_versions[OSSSYS_HWIP][0]) { + switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { case IP_VERSION(5, 0, 3): case IP_VERSION(5, 2, 0): case IP_VERSION(5, 2, 1): @@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } @@ -665,8 +671,6 @@ static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev, if (def != data) WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); } - - return; } static int navi10_ih_set_clockgating_state(void *handle, @@ -691,8 +695,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags) if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL)) *flags |= AMD_CG_SUPPORT_IH_CG; - - return; } static const struct amd_ip_funcs navi10_ih_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index ef368ca79..df218d5ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -537,7 +537,7 @@ static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev) { uint32_t reg, reg_data; - if (adev->ip_versions[NBIO_HWIP][0] != IP_VERSION(3, 3, 0)) + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) != IP_VERSION(3, 3, 0)) return; reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c index e5b5b0f49..a3622897e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c @@ -338,7 +338,7 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = { static void nbio_v4_3_init_registers(struct amdgpu_device *adev) { - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) { + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(4, 3, 0)) { uint32_t data; data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2); @@ -392,8 +392,8 @@ static void nbio_v4_3_program_aspm(struct amdgpu_device *adev) #ifdef CONFIG_PCIEASPM uint32_t def, data; - if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) && - !(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0))) + if (!(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 4, 0)) && + !(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 6, 0))) return; def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c new file mode 100644 index 000000000..1f52b4b1d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -0,0 +1,372 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "nbio_v7_11.h" + +#include "nbio/nbio_7_11_0_offset.h" +#include "nbio/nbio_7_11_0_sh_mask.h" +#include + +static void nbio_v7_11_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + +static u32 nbio_v7_11_get_rev_id(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP1_RCC_DEV0_EPF0_STRAP0); + tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; + tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; + + return tmp; +} + +static void nbio_v7_11_mc_access_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN, + BIF_BX1_BIF_FB_EN__FB_READ_EN_MASK | + BIF_BX1_BIF_FB_EN__FB_WRITE_EN_MASK); + else + WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN, 0); +} + +static u32 nbio_v7_11_get_memsize(struct amdgpu_device *adev) +{ + return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE); +} + +static void nbio_v7_11_sdma_doorbell_range(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, + int doorbell_size) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_CSDMA_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_CSDMA_DOORBELL_RANGE, + OFFSET, doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_CSDMA_DOORBELL_RANGE, + SIZE, doorbell_size); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_CSDMA_DOORBELL_RANGE, + SIZE, 0); + } + + WREG32_PCIE_PORT(reg, doorbell_range); +} + +static void nbio_v7_11_vpe_doorbell_range(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, + int doorbell_size) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VPE_DOORBELL_RANGE, + OFFSET, doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VPE_DOORBELL_RANGE, + SIZE, doorbell_size); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VPE_DOORBELL_RANGE, + SIZE, 0); + } + + WREG32_PCIE_PORT(reg, doorbell_range); +} + +static void nbio_v7_11_vcn_doorbell_range(struct amdgpu_device *adev, + bool use_doorbell, + int doorbell_index, int instance) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET, + doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0); + } + + WREG32_PCIE_PORT(reg, doorbell_range); +} + +static void nbio_v7_11_enable_doorbell_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 reg; + + + reg = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN); + reg = REG_SET_FIELD(reg, RCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, + BIF_DOORBELL_APER_EN, enable ? 1 : 0); + + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, reg); +} + +static void nbio_v7_11_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = 0; + + if (enable) { + tmp = REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, + regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, + regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL, tmp); +} + + +static void nbio_v7_11_ih_doorbell_range(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index) +{ + u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0,regGDC0_BIF_IH_DOORBELL_RANGE); + + if (use_doorbell) { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, OFFSET, + doorbell_index); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, SIZE, + 2); + } else { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, SIZE, + 0); + } + + WREG32_SOC15(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE, + ih_doorbell_range); +} + +static void nbio_v7_11_ih_control(struct amdgpu_device *adev) +{ + u32 interrupt_cntl; + + /* setup interrupt control */ + WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL2, + adev->dummy_page_addr >> 8); + + interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL); + /* + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL, + IH_DUMMY_RD_OVERRIDE, 0); + + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL, + IH_REQ_NONSNOOP_EN, 0); + + WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL, interrupt_cntl); +} + +static u32 nbio_v7_11_get_hdp_flush_req_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_GPU_HDP_FLUSH_REQ); +} + +static u32 nbio_v7_11_get_hdp_flush_done_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_GPU_HDP_FLUSH_DONE); +} + +static u32 nbio_v7_11_get_pcie_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX1_PCIE_INDEX2); +} + +static u32 nbio_v7_11_get_pcie_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX1_PCIE_DATA2); +} + +static u32 nbio_v7_11_get_pcie_port_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_RSMU_INDEX); +} + +static u32 nbio_v7_11_get_pcie_port_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_RSMU_DATA); +} + +const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = { + .ref_and_mask_cp0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK, + .ref_and_mask_sdma1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK, +}; + +static void nbio_v7_11_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3); + data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data); + +} + +static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) + return; + + def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL); + if (enable) { + data |= (BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); + } else { + data &= ~(BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | + BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); + } + + if (def != data) + WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL, data); +} + +static void nbio_v7_11_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + return; + + def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2); + if (enable) + data |= BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + else + data &= ~BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + + if (def != data) + WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2, data); + + def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1); + if (enable) { + data |= (BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + } else { + data &= ~(BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + } + + if (def != data) + WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1, data); +} + +static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev, + u64 *flags) +{ + uint32_t data; + + /* AMD_CG_SUPPORT_BIF_MGCG */ + data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL); + if (data & BIF_BIF256_CI256_RC3X4_USB4_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_BIF_MGCG; + + /* AMD_CG_SUPPORT_BIF_LS */ + data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2); + if (data & BIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2__SLV_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_BIF_LS; +} + +const struct amdgpu_nbio_funcs nbio_v7_11_funcs = { + .get_hdp_flush_req_offset = nbio_v7_11_get_hdp_flush_req_offset, + .get_hdp_flush_done_offset = nbio_v7_11_get_hdp_flush_done_offset, + .get_pcie_index_offset = nbio_v7_11_get_pcie_index_offset, + .get_pcie_data_offset = nbio_v7_11_get_pcie_data_offset, + .get_pcie_port_index_offset = nbio_v7_11_get_pcie_port_index_offset, + .get_pcie_port_data_offset = nbio_v7_11_get_pcie_port_data_offset, + .get_rev_id = nbio_v7_11_get_rev_id, + .mc_access_enable = nbio_v7_11_mc_access_enable, + .get_memsize = nbio_v7_11_get_memsize, + .sdma_doorbell_range = nbio_v7_11_sdma_doorbell_range, + .vcn_doorbell_range = nbio_v7_11_vcn_doorbell_range, + .vpe_doorbell_range = nbio_v7_11_vpe_doorbell_range, + .enable_doorbell_aperture = nbio_v7_11_enable_doorbell_aperture, + .enable_doorbell_selfring_aperture = nbio_v7_11_enable_doorbell_selfring_aperture, + .ih_doorbell_range = nbio_v7_11_ih_doorbell_range, + .update_medium_grain_clock_gating = nbio_v7_11_update_medium_grain_clock_gating, + .update_medium_grain_light_sleep = nbio_v7_11_update_medium_grain_light_sleep, + .get_clockgating_state = nbio_v7_11_get_clockgating_state, + .ih_control = nbio_v7_11_ih_control, + .init_registers = nbio_v7_11_init_registers, + .remap_hdp_registers = nbio_v7_11_remap_hdp_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h new file mode 100644 index 000000000..9d8258ed3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NBIO_V7_11_H__ +#define __NBIO_V7_11_H__ + +#include "soc15_common.h" + +extern const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg; +extern const struct amdgpu_nbio_funcs nbio_v7_11_funcs; +extern const struct amdgpu_nbio_ras_funcs nbio_v7_11_ras_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 4ef1fa460..e962821ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -59,7 +59,7 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) { u32 tmp; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): case IP_VERSION(7, 5, 0): @@ -78,7 +78,7 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) { - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): case IP_VERSION(7, 5, 0): @@ -262,7 +262,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): case IP_VERSION(7, 5, 0): @@ -369,7 +369,7 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = { static void nbio_v7_2_init_registers(struct amdgpu_device *adev) { uint32_t def, data; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): case IP_VERSION(7, 5, 0): @@ -394,7 +394,7 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) break; } - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 3, 0): case IP_VERSION(7, 5, 1): data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 685abf57f..6d24c8492 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -347,7 +347,7 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev) adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) && + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4) && !amdgpu_sriov_vf(adev)) { baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL); if (baco_cntl & @@ -365,9 +365,12 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device { uint32_t bif_doorbell_intr_cntl; struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); - struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_err_data err_data; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + if (amdgpu_ras_error_data_init(&err_data)) + return; + if (adev->asic_type == CHIP_ALDEBARAN) bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE); else @@ -418,6 +421,8 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device */ amdgpu_ras_reset_gpu(adev); } + + amdgpu_ras_error_data_fini(&err_data); } static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) @@ -702,7 +707,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) #ifdef CONFIG_PCIEASPM uint32_t def, data; - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4)) return; def = data = RREG32_PCIE(smnPCIE_LC_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index def89379b..4df1055e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -254,7 +254,7 @@ static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL); @@ -283,7 +283,7 @@ static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index ae45656eb..db013f861 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -56,8 +56,15 @@ static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { u32 tmp; + tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + /* If it is VF or subrevision holds a non-zero value, that should be used */ + if (tmp || amdgpu_sriov_vf(adev)) + return tmp; + + /* If discovery subrev is not updated, use register version */ tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0); + tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); return tmp; } @@ -173,8 +180,6 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan default: break; } - - return; } static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, @@ -426,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) u32 inst_mask; int i; + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET( + NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) + << 2; WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 0xff & ~(adev->gfx.xcc_mask)); @@ -556,16 +567,18 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - return; } static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) { uint32_t bif_doorbell_intr_cntl; struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); - struct ras_err_data err_data = {0, 0, 0, NULL}; + struct ras_err_data err_data; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + if (amdgpu_ras_error_data_init(&err_data)) + return; + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); if (REG_GET_FIELD(bif_doorbell_intr_cntl, @@ -605,6 +618,8 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device dev_info(adev->dev, "RAS controller interrupt triggered " "by NBIF error\n"); } + + amdgpu_ras_error_data_fini(&err_data); } static void nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 13aca808e..4d7976b77 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -214,7 +214,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) return -EINVAL; - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 192): @@ -453,7 +453,7 @@ nv_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 5, 0): case IP_VERSION(13, 0, 1): case IP_VERSION(13, 0, 3): @@ -513,11 +513,10 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void nv_program_aspm(struct amdgpu_device *adev) { - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } @@ -609,9 +608,8 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, if (adev->gfx.funcs->update_perfmon_mgcg) adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->enable_aspm) && - amdgpu_device_should_use_aspm(adev)) + if (adev->nbio.funcs->enable_aspm && + amdgpu_device_should_use_aspm(adev)) adev->nbio.funcs->enable_aspm(adev, !enter); return 0; @@ -669,7 +667,7 @@ static int nv_common_early_init(void *handle) /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | @@ -1073,7 +1071,7 @@ static int nv_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(2, 3, 0): case IP_VERSION(2, 3, 1): case IP_VERSION(2, 3, 2): @@ -1115,8 +1113,6 @@ static void nv_common_get_clockgating_state(void *handle, u64 *flags) adev->hdp.funcs->get_clock_gating_state(adev, flags); adev->smuio.funcs->get_clock_gating_state(adev, flags); - - return; } static const struct amd_ip_funcs nv_common_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 18917df78..4bb5e1021 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -293,6 +293,10 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_RS64_MEC_P1_STACK = 95, /* RS64 MEC stack P1 SOC21 */ GFX_FW_TYPE_RS64_MEC_P2_STACK = 96, /* RS64 MEC stack P2 SOC21 */ GFX_FW_TYPE_RS64_MEC_P3_STACK = 97, /* RS64 MEC stack P3 SOC21 */ + GFX_FW_TYPE_VPEC_FW1 = 100, /* VPEC FW1 To Save VPE */ + GFX_FW_TYPE_VPEC_FW2 = 101, /* VPEC FW2 To Save VPE */ + GFX_FW_TYPE_VPE = 102, + GFX_FW_TYPE_P2S_TABLE = 129, GFX_FW_TYPE_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5f10883da..145186a1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -58,9 +58,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) return err; err = psp_init_ta_microcode(psp, ucode_prefix); - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) && - (adev->pdev->revision == 0xa1) && - (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) { + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0)) && + (adev->pdev->revision == 0xa1) && + (psp->securedisplay_context.context.bin_desc.fw_version >= + 0x27000008)) { adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; } return err; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 8f84fe40a..efa37e3b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -95,7 +95,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 4): err = psp_init_sos_microcode(psp, ucode_prefix); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index fe1995ed1..df1844d08 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -82,7 +82,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 2): err = psp_init_sos_microcode(psp, ucode_prefix); if (err) @@ -164,7 +164,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) int retry_loop, retry_cnt, ret; retry_cnt = - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) ? + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) ? PSP_VMBX_POLLING_LIMIT : 10; /* Wait for bootloader to signify that it is ready having bit 31 of @@ -188,7 +188,7 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) { + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { psp_v13_0_wait_for_vmbx_ready(psp); return psp_v13_0_wait_for_bootloader(psp); @@ -267,6 +267,12 @@ static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp) return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); } +static inline void psp_v13_0_init_sos_version(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + psp->sos.fw_version = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_58); +} static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) { @@ -277,8 +283,10 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - if (psp_v13_0_is_sos_alive(psp)) + if (psp_v13_0_is_sos_alive(psp)) { + psp_v13_0_init_sos_version(psp); return 0; + } ret = psp_v13_0_wait_for_bootloader(psp); if (ret) @@ -302,6 +310,9 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, true); + if (!ret) + psp_v13_0_init_sos_version(psp); + return ret; } @@ -735,7 +746,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 10)) { + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 10)) { uint32_t reg_data; /* MP1 fatal error: trigger PSP dram read to unhalt PSP * during MP1 triggered sync flood. @@ -752,6 +763,83 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp) return 0; } + +static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev, + uint32_t inst, + uint32_t boot_error) +{ + uint32_t socket_id; + uint32_t aid_id; + uint32_t hbm_id; + uint32_t reg_data; + + socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID); + aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID); + hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID); + + reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109); + dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n", + socket_id, aid_id, reg_data); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n", + socket_id, aid_id, hbm_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD)) + dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n", + socket_id, aid_id, hbm_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n", + socket_id, aid_id, hbm_id); +} + +static int psp_v13_0_query_boot_status(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + int inst_mask = adev->aid_mask; + uint32_t reg_data; + uint32_t i; + int ret = 0; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) + return 0; + + if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109) + return 0; + + for_each_inst(i, inst_mask) { + reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126); + if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) { + psp_v13_0_boot_error_reporting(adev, i, reg_data); + ret = -EINVAL; + break; + } + } + + return ret; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -774,6 +862,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .update_spirom = psp_v13_0_update_spirom, .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, + .query_boot_status = psp_v13_0_query_boot_status, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c index d5ba58eba..eaa5512a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c @@ -40,7 +40,7 @@ static int psp_v13_0_4_init_microcode(struct psp_context *psp) amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); - switch (adev->ip_versions[MP0_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(13, 0, 4): err = psp_init_toc_microcode(psp, ucode_prefix); if (err) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 51afc9299..8d5d86675 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -339,8 +339,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -474,9 +472,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -816,9 +811,14 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring, static int sdma_v2_4_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; + r = sdma_v2_4_init_microcode(adev); + if (r) + return r; + sdma_v2_4_set_ring_funcs(adev); sdma_v2_4_set_buffer_funcs(adev); sdma_v2_4_set_vm_pte_funcs(adev); @@ -851,12 +851,6 @@ static int sdma_v2_4_sw_init(void *handle) if (r) return r; - r = sdma_v2_4_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 344202870..2ad615be4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -513,8 +513,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -746,9 +744,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1087,6 +1082,7 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, static int sdma_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; switch (adev->asic_type) { case CHIP_STONEY: @@ -1097,6 +1093,10 @@ static int sdma_v3_0_early_init(void *handle) break; } + r = sdma_v3_0_init_microcode(adev); + if (r) + return r; + sdma_v3_0_set_ring_funcs(adev); sdma_v3_0_set_buffer_funcs(adev); sdma_v3_0_set_vm_pte_funcs(adev); @@ -1129,12 +1129,6 @@ static int sdma_v3_0_sw_init(void *handle) if (r) return r; - r = sdma_v3_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index cd37f45e0..3d68dd552 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -469,7 +469,7 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id) static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_4, @@ -539,7 +539,7 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev) * The only chips with SDMAv4 and ULV are VG10 and VG20. * Server SKUs take a different hysteresis setting from other SKUs. */ - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): if (adev->pdev->device == 0x6860) break; @@ -578,8 +578,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) int ret, i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || - adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) { + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 2, 2) || + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 4, 0)) { /* Acturus & Aldebaran will leverage the same FW memory for every SDMA instance */ ret = amdgpu_sdma_init_microcode(adev, 0, true); @@ -875,8 +877,6 @@ static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); @@ -911,8 +911,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, @@ -978,7 +976,8 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) * Arcturus for the moment and firmware version 14 * and above. */ - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 2, 2) && adev->sdma.instance[i].fw_version >= 14) WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); /* Extend page fault timeout to avoid interrupt storm */ @@ -1255,7 +1254,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) return; - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 1, 0): case IP_VERSION(4, 1, 1): case IP_VERSION(4, 1, 2): @@ -1399,13 +1398,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(page); if (r) return r; - - if (adev->mman.buffer_funcs_ring == page) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return r; @@ -1698,7 +1691,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) { uint fw_version = adev->sdma.instance[0].fw_version; - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): return fw_version >= 430; case IP_VERSION(4, 0, 1): @@ -1717,13 +1710,11 @@ static int sdma_v4_0_early_init(void *handle) int r; r = sdma_v4_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); + if (r) return r; - } /* TODO: Page queue breaks driver reload under SRIOV */ - if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) && + if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 0, 0)) && amdgpu_sriov_vf((adev))) adev->sdma.has_page_queue = false; else if (sdma_v4_0_fw_support_paging_queue(adev)) @@ -1748,11 +1739,8 @@ static int sdma_v4_0_late_init(void *handle) sdma_v4_0_setup_ulv(adev); - if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && - adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) - adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); - } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA); return 0; } @@ -1823,7 +1811,9 @@ static int sdma_v4_0_sw_init(void *handle) * On Arcturus, SDMA instance 5~7 has a different vmhub * type(AMDGPU_MMHUB1). */ - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 2, 2) && + i >= 5) ring->vm_hub = AMDGPU_MMHUB1(0); else ring->vm_hub = AMDGPU_MMHUB0(0); @@ -1843,8 +1833,10 @@ static int sdma_v4_0_sw_init(void *handle) /* paging queue use same doorbell index/routing as gfx queue * with 0x400 (4096 dwords) offset on second doorbell page */ - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && - adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) { + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= + IP_VERSION(4, 0, 0) && + amdgpu_ip_version(adev, SDMA0_HWIP, 0) < + IP_VERSION(4, 2, 0)) { ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; @@ -1856,7 +1848,9 @@ static int sdma_v4_0_sw_init(void *handle) (adev->doorbell_index.sdma_engine[i] + 1) << 1; } - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 2, 2) && + i >= 5) ring->vm_hub = AMDGPU_MMHUB1(0); else ring->vm_hub = AMDGPU_MMHUB0(0); @@ -1890,8 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || - adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 2, 2) || + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 0)) amdgpu_sdma_destroy_inst_ctx(adev, true); else amdgpu_sdma_destroy_inst_ctx(adev, false); @@ -1917,11 +1911,8 @@ static int sdma_v4_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1960,7 +1951,6 @@ static int sdma_v4_0_resume(void *handle) if (adev->in_s0ix) { sdma_v4_0_enable(adev, true); sdma_v4_0_gfx_enable(adev, true); - amdgpu_ttm_set_buffer_funcs_status(adev, true); return 0; } @@ -2036,14 +2026,16 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, amdgpu_fence_process(&adev->sdma.instance[instance].ring); break; case 1: - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; case 2: /* XXX compute */ break; case 3: - if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) != + IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } @@ -2259,7 +2251,7 @@ static int sdma_v4_0_set_powergating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 1, 0): case IP_VERSION(4, 1, 1): case IP_VERSION(4, 1, 2): @@ -2622,7 +2614,7 @@ static struct amdgpu_sdma_ras sdma_v4_0_ras = { static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 2): adev->sdma.ras = &sdma_v4_0_ras; @@ -2633,7 +2625,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) default: break; } - } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index f413898dd..0f24af6f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -132,7 +132,8 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev) int ret, i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) { + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 4, 2)) { ret = amdgpu_sdma_init_microcode(adev, 0, true); break; } else { @@ -154,13 +155,13 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev) */ static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring) { - u64 *rptr; + u64 rptr; /* XXX check if swapping is necessary on BE */ - rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); + rptr = READ_ONCE(*((u64 *)&ring->adev->wb.wb[ring->rptr_offs])); - DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); - return ((*rptr) >> 2); + DRM_DEBUG("rptr before shift == 0x%016llx\n", rptr); + return rptr >> 2; } /** @@ -426,6 +427,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask) { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 doorbell_offset, doorbell; u32 rb_cntl, ib_cntl; int i, unset = 0; @@ -443,6 +445,18 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + + if (sdma[i]->use_doorbell) { + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_GFX_DOORBELL_OFFSET, + OFFSET, 0); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset); + } } } @@ -630,12 +644,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); - /* Initialize the ring buffer's read and write pointers */ - WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); - /* set the wb address whether it's enabled or not */ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI, upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); @@ -653,6 +661,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* before programing wptr to a less value, need set minor_ptr_update first */ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1); + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); @@ -1231,7 +1245,7 @@ static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t re static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 4, 2): return false; default: @@ -1245,10 +1259,8 @@ static int sdma_v4_4_2_early_init(void *handle) int r; r = sdma_v4_4_2_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); + if (r) return r; - } /* TODO: Page queue breaks driver reload under SRIOV */ if (sdma_v4_4_2_fw_support_paging_queue(adev)) @@ -1277,11 +1289,8 @@ static int sdma_v4_4_2_late_init(void *handle) .cb = sdma_v4_4_2_process_ras_data_cb, }; #endif - if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && - adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) - adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); - } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA); return 0; } @@ -1401,7 +1410,7 @@ static int sdma_v4_4_2_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) amdgpu_sdma_destroy_inst_ctx(adev, true); else amdgpu_sdma_destroy_inst_ctx(adev, false); @@ -2052,7 +2061,7 @@ const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = { .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 4, .minor = 4, - .rev = 0, + .rev = 2, .funcs = &sdma_v4_4_2_ip_funcs, }; @@ -2131,6 +2140,11 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst); + unsigned long ue_count = 0; + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = adev->sdma.instance[sdma_inst].aid_id, + }; /* sdma v4_4_2 doesn't support query ce counts */ amdgpu_ras_inst_query_ras_error_count(adev, @@ -2140,7 +2154,9 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, ARRAY_SIZE(sdma_v4_4_2_ras_memory_list), sdma_dev_inst, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - &err_data->ue_count); + &ue_count); + + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); } static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 1cc34efb4..3c485e5a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -184,7 +184,7 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(5, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_5, @@ -559,8 +559,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -825,9 +823,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1336,6 +1331,11 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, static int sdma_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = sdma_v5_0_init_microcode(adev); + if (r) + return r; sdma_v5_0_set_ring_funcs(adev); sdma_v5_0_set_buffer_funcs(adev); @@ -1367,12 +1367,6 @@ static int sdma_v5_0_sw_init(void *handle) if (r) return r; - r = sdma_v5_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1427,11 +1421,8 @@ static int sdma_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v5_0_ctx_switch_enable(adev, false); sdma_v5_0_enable(adev, false); @@ -1697,7 +1688,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 0f930fd8a..0058f3f7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -364,8 +364,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -625,9 +623,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1172,6 +1167,11 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, static int sdma_v5_2_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_sdma_init_microcode(adev, 0, true); + if (r) + return r; sdma_v5_2_set_ring_funcs(adev); sdma_v5_2_set_buffer_funcs(adev); @@ -1231,12 +1231,6 @@ static int sdma_v5_2_sw_init(void *handle) return r; } - r = amdgpu_sdma_init_microcode(adev, 0, true); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1285,11 +1279,8 @@ static int sdma_v5_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v5_2_ctx_switch_enable(adev, false); sdma_v5_2_enable(adev, false); @@ -1510,7 +1501,7 @@ static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev, static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev, int i) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(5, 2, 1): if (adev->sdma.instance[i].fw_version < 70) return false; @@ -1575,8 +1566,9 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev int i; for (i = 0; i < adev->sdma.num_instances; i++) { - - if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) + if (adev->sdma.instance[i].fw_version < 70 && + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(5, 2, 1)) adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { @@ -1605,7 +1597,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(5, 2, 0): case IP_VERSION(5, 2, 2): case IP_VERSION(5, 2, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 45be0af25..3c7ddd219 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -156,68 +156,35 @@ static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring) static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size; - - DRM_DEBUG("Setting write pointer\n"); - - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); + if (ring->use_doorbell) { + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); - *wptr_saved = ring->wptr << 2; - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, - ring->wptr << 2); - } + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); } else { - if (ring->use_doorbell) { - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " - "lower_32_bits(ring->wptr) << 2 == 0x%08x " - "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); - /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("Not using doorbell -- " - "regSDMA%i_GFX_RB_WPTR == 0x%08x " - "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", - ring->me, - lower_32_bits(ring->wptr << 2), - ring->me, - upper_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, - ring->me, regSDMA0_QUEUE0_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, - ring->me, regSDMA0_QUEUE0_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + DRM_DEBUG("Not using doorbell -- " + "regSDMA%i_GFX_RB_WPTR == 0x%08x " + "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, + ring->me, regSDMA0_QUEUE0_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, + ring->me, regSDMA0_QUEUE0_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); } } @@ -234,7 +201,7 @@ static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) amdgpu_ring_write(ring, ring->funcs->nop); } -/** +/* * sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer @@ -381,8 +348,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0); @@ -594,9 +559,6 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -937,7 +899,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) return r; } -/** +/* * sdma_v6_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information @@ -1119,7 +1081,7 @@ static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ } -/** +/* * sdma_v6_0_ring_pad_ib - pad the IB * @ib: indirect buffer to fill with padding * @ring: amdgpu ring pointer @@ -1168,7 +1130,7 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } -/** +/* * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer @@ -1246,19 +1208,23 @@ static struct amdgpu_sdma_ras sdma_v6_0_3_ras = { static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(6, 0, 3): adev->sdma.ras = &sdma_v6_0_3_ras; break; default: break; } - } static int sdma_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_sdma_init_microcode(adev, 0, true); + if (r) + return r; sdma_v6_0_set_ring_funcs(adev); sdma_v6_0_set_buffer_funcs(adev); @@ -1283,12 +1249,6 @@ static int sdma_v6_0_sw_init(void *handle) if (r) return r; - r = amdgpu_sdma_init_microcode(adev, 0, true); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1343,11 +1303,8 @@ static int sdma_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v6_0_ctxempty_int_enable(adev, false); sdma_v6_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4b81f29e5..a75752615 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2440,8 +2440,6 @@ static void si_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (adev->flags & AMD_IS_APU) - return; orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); data &= ~LC_XMIT_N_FTS_MASK; data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 42c4547f3..9aa0e11ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -115,8 +115,6 @@ static void si_dma_stop(struct amdgpu_device *adev) u32 rb_cntl; unsigned i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { /* dma0 */ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); @@ -177,9 +175,6 @@ static int si_dma_start(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 9a24f17a5..cada9f300 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32(IH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(IH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 8b8086d5c..93f6772d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -36,7 +36,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c #if 0 struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7) && adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) return true; #endif @@ -48,18 +48,17 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_context *reset_context) { struct amdgpu_reset_handler *handler; + int i; if (reset_context->method != AMD_RESET_METHOD_NONE) { - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_context->method) return handler; } } if (sienna_cichlid_is_mode2_default(reset_ctl)) { - list_for_each_entry (handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == AMD_RESET_METHOD_MODE2) return handler; } @@ -120,9 +119,9 @@ static void sienna_cichlid_async_reset(struct work_struct *work) struct amdgpu_reset_control *reset_ctl = container_of(work, struct amdgpu_reset_control, reset_work); struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int i; - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_ctl->active_reset) { dev_dbg(adev->dev, "Resetting device\n"); handler->do_reset(adev); @@ -281,6 +280,11 @@ static struct amdgpu_reset_handler sienna_cichlid_mode2_handler = { .do_reset = sienna_cichlid_mode2_reset, }; +static struct amdgpu_reset_handler + *sienna_cichlid_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { + &sienna_cichlid_mode2_handler, + }; + int sienna_cichlid_reset_init(struct amdgpu_device *adev) { struct amdgpu_reset_control *reset_ctl; @@ -294,11 +298,9 @@ int sienna_cichlid_reset_init(struct amdgpu_device *adev) reset_ctl->active_reset = AMD_RESET_METHOD_NONE; reset_ctl->get_reset_handler = sienna_cichlid_get_reset_handler; - INIT_LIST_HEAD(&reset_ctl->reset_handlers); INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset); /* Only mode2 is handled through reset control now */ - amdgpu_reset_add_handler(reset_ctl, &sienna_cichlid_mode2_handler); - + reset_ctl->reset_handlers = &sienna_cichlid_rst_handlers; adev->reset_cntl = reset_ctl; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index ae29620b1..04c797d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -44,10 +44,10 @@ smu_v13_0_10_get_reset_handler(struct amdgpu_reset_control *reset_ctl, { struct amdgpu_reset_handler *handler; struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int i; if (reset_context->method != AMD_RESET_METHOD_NONE) { - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_context->method) return handler; } @@ -55,8 +55,7 @@ smu_v13_0_10_get_reset_handler(struct amdgpu_reset_control *reset_ctl, if (smu_v13_0_10_is_mode2_default(reset_ctl) && amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_MODE2) { - list_for_each_entry (handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == AMD_RESET_METHOD_MODE2) return handler; } @@ -119,9 +118,9 @@ static void smu_v13_0_10_async_reset(struct work_struct *work) struct amdgpu_reset_control *reset_ctl = container_of(work, struct amdgpu_reset_control, reset_work); struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int i; - list_for_each_entry(handler, &reset_ctl->reset_handlers, - handler_list) { + for_each_handler(i, handler, reset_ctl) { if (handler->reset_method == reset_ctl->active_reset) { dev_dbg(adev->dev, "Resetting device\n"); handler->do_reset(adev); @@ -272,6 +271,11 @@ static struct amdgpu_reset_handler smu_v13_0_10_mode2_handler = { .do_reset = smu_v13_0_10_mode2_reset, }; +static struct amdgpu_reset_handler + *smu_v13_0_10_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { + &smu_v13_0_10_mode2_handler, + }; + int smu_v13_0_10_reset_init(struct amdgpu_device *adev) { struct amdgpu_reset_control *reset_ctl; @@ -285,10 +289,9 @@ int smu_v13_0_10_reset_init(struct amdgpu_device *adev) reset_ctl->active_reset = AMD_RESET_METHOD_NONE; reset_ctl->get_reset_handler = smu_v13_0_10_get_reset_handler; - INIT_LIST_HEAD(&reset_ctl->reset_handlers); INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset); /* Only mode2 is handled through reset control now */ - amdgpu_reset_add_handler(reset_ctl, &smu_v13_0_10_mode2_handler); + reset_ctl->reset_handlers = &smu_v13_0_10_rst_handlers; adev->reset_cntl = reset_ctl; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c index 13e905c22..bf8b8e5dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c @@ -128,6 +128,27 @@ static bool smuio_v13_0_is_host_gpu_xgmi_supported(struct amdgpu_device *adev) return data ? true : false; } +static enum amdgpu_pkg_type smuio_v13_0_get_pkg_type(struct amdgpu_device *adev) +{ + enum amdgpu_pkg_type pkg_type; + u32 data; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, TOPOLOGY_ID); + + switch (data) { + case 0x4: + case 0xC: + pkg_type = AMDGPU_PKG_TYPE_CEM; + break; + default: + pkg_type = AMDGPU_PKG_TYPE_OAM; + break; + } + + return pkg_type; +} + const struct amdgpu_smuio_funcs smuio_v13_0_funcs = { .get_rom_index_offset = smuio_v13_0_get_rom_index_offset, .get_rom_data_offset = smuio_v13_0_get_rom_data_offset, @@ -136,4 +157,5 @@ const struct amdgpu_smuio_funcs smuio_v13_0_funcs = { .is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported, .update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating, .get_clock_gating_state = smuio_v13_0_get_clock_gating_state, + .get_pkg_type = smuio_v13_0_get_pkg_type, }; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c index 4368a5891..5461b5289 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c @@ -84,6 +84,12 @@ static enum amdgpu_pkg_type smuio_v13_0_3_get_pkg_type(struct amdgpu_device *ade * b0100 - b1111 - Reserved */ switch (data & PKG_TYPE_MASK) { + case 0x0: + pkg_type = AMDGPU_PKG_TYPE_CEM; + break; + case 0x1: + pkg_type = AMDGPU_PKG_TYPE_OAM; + break; case 0x2: pkg_type = AMDGPU_PKG_TYPE_APU; break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 3667f9a54..9b5af3f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -174,8 +174,8 @@ static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = { static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - if (adev->ip_versions[VCE_HWIP][0]) { - switch (adev->ip_versions[VCE_HWIP][0]) { + if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { + switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 1, 0): if (encode) @@ -187,7 +187,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, return -EINVAL; } } else { - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): if (encode) @@ -324,12 +324,12 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) return 10000; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1)) return reference_clock / 4; return reference_clock; @@ -523,7 +523,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): case IP_VERSION(12, 0, 0): @@ -599,7 +599,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev) static bool soc15_supports_baco(struct amdgpu_device *adev) { - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(11, 0, 2): if (adev->asic_type == CHIP_VEGA20) { @@ -646,8 +646,7 @@ static void soc15_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } @@ -922,6 +921,8 @@ static int soc15_common_early_init(void *handle) adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext; adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; + adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext; + adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext; adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; @@ -936,7 +937,7 @@ static int soc15_common_early_init(void *handle) /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. */ - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | @@ -1296,10 +1297,32 @@ static int soc15_common_suspend(void *handle) return soc15_common_hw_fini(adev); } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc15_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); + soc15_asic_reset(adev); + } return soc15_common_hw_init(adev); } @@ -1370,7 +1393,7 @@ static int soc15_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(6, 1, 0): case IP_VERSION(6, 2, 0): case IP_VERSION(7, 4, 0): @@ -1428,8 +1451,8 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags) if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state) adev->hdp.funcs->get_clock_gating_state(adev, flags); - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) { - + if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) && + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))) { /* AMD_CG_SUPPORT_DRM_MGCG */ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); if (!(data & 0x01000000)) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index da683afa0..242b24f73 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -69,7 +69,7 @@ #define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0) -#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) +#define RREG32_SOC15_IP_NO_KIQ(ip, reg, inst) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ @@ -86,8 +86,8 @@ #define WREG32_SOC15_IP(ip, reg, value) \ __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0) -#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) +#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value, inst) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ @@ -140,7 +140,7 @@ /* for GC only */ #define RREG32_RLC(reg) \ - __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP, 0) #define WREG32_RLC_NO_KIQ(reg, value, hwip) \ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) @@ -204,4 +204,10 @@ + adev->asic_funcs->encode_ext_smn_addressing(ext), \ value) \ +#define RREG64_MCA(ext, mca_base, idx) \ + RREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8)) + +#define WREG64_MCA(ext, mca_base, idx, val) \ + WREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8), val) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 8b2ff2b28..4d7188912 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { @@ -153,10 +153,11 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) return -EINVAL; - switch (adev->ip_versions[UVD_HWIP][0]) { + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): case IP_VERSION(4, 0, 4): + case IP_VERSION(4, 0, 5): if (amdgpu_sriov_vf(adev)) { if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) || !amdgpu_sriov_is_av1_support(adev)) { @@ -373,13 +374,14 @@ soc21_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): return AMD_RESET_METHOD_MODE1; case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): return AMD_RESET_METHOD_MODE2; default: if (amdgpu_dpm_is_baco_supported(adev)) @@ -432,8 +434,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } @@ -447,7 +448,7 @@ const struct amdgpu_ip_block_version soc21_common_ip_block = { static bool soc21_need_full_reset(struct amdgpu_device *adev) { - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): @@ -503,6 +504,7 @@ static void soc21_init_doorbell_index(struct amdgpu_device *adev) adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; + adev->doorbell_index.vpe_ring = AMDGPU_NAVI10_DOORBELL64_VPE; adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; @@ -575,7 +577,7 @@ static int soc21_common_early_init(void *handle) adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xff; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -687,7 +689,33 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_JPEG; adev->external_rev_id = adev->rev_id + 0x80; break; - + case IP_VERSION(11, 5, 0): + adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_PERF_CLK | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS; + adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_GFX_PG; + adev->external_rev_id = adev->rev_id + 0x1; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -831,10 +859,12 @@ static int soc21_common_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(4, 3, 0): case IP_VERSION(4, 3, 1): case IP_VERSION(7, 7, 0): + case IP_VERSION(7, 7, 1): + case IP_VERSION(7, 11, 0): adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, @@ -853,7 +883,7 @@ static int soc21_common_set_powergating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->ip_versions[LSDMA_HWIP][0]) { + switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 2): adev->lsdma.funcs->update_memory_power_gating(adev, @@ -873,8 +903,6 @@ static void soc21_common_get_clockgating_state(void *handle, u64 *flags) adev->nbio.funcs->get_clockgating_state(adev, flags); adev->hdp.funcs->get_clock_gating_state(adev, flags); - - return; } static const struct amd_ip_funcs soc21_common_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h index da815a93d..d57480326 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2018-2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - #ifndef _TA_XGMI_IF_H #define _TA_XGMI_IF_H @@ -28,20 +27,31 @@ #define RSP_ID_MASK (1U << 31) #define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) +#define EXTEND_PEER_LINK_INFO_CMD_FLAG 1 + enum ta_command_xgmi { + /* Initialize the Context and Session Topology */ TA_COMMAND_XGMI__INITIALIZE = 0x00, + /* Gets the current GPU's node ID */ TA_COMMAND_XGMI__GET_NODE_ID = 0x01, + /* Gets the current GPU's hive ID */ TA_COMMAND_XGMI__GET_HIVE_ID = 0x02, - TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03, + /* Gets the Peer's topology Information */ + TA_COMMAND_XGMI__GET_TOPOLOGY_INFO = 0x03, + /* Sets the Peer's topology Information */ TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04, - TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B + /* Gets the total links between adjacent peer dies in hive */ + TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B, + /* Gets the total links and connected port numbers between adjacent peer dies in hive */ + TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS = 0x0C }; /* XGMI related enumerations */ /**********************************************************/; -enum ta_xgmi_connected_nodes { - TA_XGMI__MAX_CONNECTED_NODES = 64 -}; +enum { TA_XGMI__MAX_CONNECTED_NODES = 64 }; +enum { TA_XGMI__MAX_INTERNAL_STATE = 32 }; +enum { TA_XGMI__MAX_INTERNAL_STATE_BUFFER = 128 }; +enum { TA_XGMI__MAX_PORT_NUM = 8 }; enum ta_xgmi_status { TA_XGMI_STATUS__SUCCESS = 0x00, @@ -81,6 +91,18 @@ struct ta_xgmi_peer_link_info { uint8_t num_links; }; +struct xgmi_connected_port_num { + uint8_t dst_xgmi_port_num; + uint8_t src_xgmi_port_num; +}; + +/* support both the port num and num_links */ +struct ta_xgmi_extend_peer_link_info { + uint64_t node_id; + uint8_t num_links; + struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM]; +}; + struct ta_xgmi_cmd_initialize_output { uint32_t status; }; @@ -103,16 +125,21 @@ struct ta_xgmi_cmd_get_topology_info_output { struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; }; -struct ta_xgmi_cmd_get_peer_link_info_output { +struct ta_xgmi_cmd_set_topology_info_input { uint32_t num_nodes; - struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; + struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; }; -struct ta_xgmi_cmd_set_topology_info_input { +/* support XGMI TA w/ and w/o port_num both so two similar structs defined */ +struct ta_xgmi_cmd_get_peer_link_info { uint32_t num_nodes; - struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; + struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; }; +struct ta_xgmi_cmd_get_extend_peer_link_info { + uint32_t num_nodes; + struct ta_xgmi_extend_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; +}; /**********************************************************/ /* Common input structure for XGMI callbacks */ union ta_xgmi_cmd_input { @@ -126,16 +153,23 @@ union ta_xgmi_cmd_output { struct ta_xgmi_cmd_get_node_id_output get_node_id; struct ta_xgmi_cmd_get_hive_id_output get_hive_id; struct ta_xgmi_cmd_get_topology_info_output get_topology_info; - struct ta_xgmi_cmd_get_peer_link_info_output get_link_info; + struct ta_xgmi_cmd_get_peer_link_info get_link_info; + struct ta_xgmi_cmd_get_extend_peer_link_info get_extend_link_info; }; -/**********************************************************/ struct ta_xgmi_shared_memory { uint32_t cmd_id; uint32_t resp_id; enum ta_xgmi_status xgmi_status; + + /* if the number of xgmi link record is more than 128, driver will set the + * flag 0 to get the first 128 of the link records and will set to 1, to get + * the second set + */ uint8_t flag_extend_link_record; - uint8_t reserved0[3]; + /* bit0: port_num info support flag for GET_EXTEND_PEER_LINKS commmand */ + uint8_t caps_flag; + uint8_t reserved[2]; union ta_xgmi_cmd_input xgmi_in_message; union ta_xgmi_cmd_output xgmi_out_message; }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 917707bba..450b6e831 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c new file mode 100644 index 000000000..e9c2ff74f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -0,0 +1,389 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v12_0.h" +#include "amdgpu_ras.h" +#include "amdgpu_umc.h" +#include "amdgpu.h" +#include "umc/umc_12_0_0_offset.h" +#include "umc/umc_12_0_0_sh_mask.h" + +const uint32_t + umc_v12_0_channel_idx_tbl[] + [UMC_V12_0_UMC_INSTANCE_NUM] + [UMC_V12_0_CHANNEL_INSTANCE_NUM] = { + {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, + {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, + {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, + {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, + {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, + {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, + {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, + {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} + }; + +/* mapping of MCA error address to normalized address */ +static const uint32_t umc_v12_0_ma2na_mapping[] = { + 0, 5, 6, 8, 9, 14, 12, 13, + 10, 11, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 24, 7, 29, 30, +}; + +static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, + uint32_t node_inst, + uint32_t umc_inst, + uint32_t ch_inst) +{ + uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst; + uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET; + + umc_inst = index / 4; + ch_inst = index % 4; + + return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst + + UMC_V12_0_NODE_DIST * node_inst + cross_node_offset; +} + +static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + uint64_t odecc_err_cnt_addr; + uint64_t umc_reg_offset = + get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); + + odecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); + + /* clear error count */ + WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V12_0_CE_CNT_INIT); + + return 0; +} + +static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) +{ + amdgpu_umc_loop_channels(adev, + umc_v12_0_reset_error_count_per_channel, NULL); +} + +bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +{ + return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); +} + +bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +{ + return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || + /* Identify data parity error in replay mode */ + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && + !(umc_v12_0_is_uncorrectable_error(mc_umc_status))))); +} + +static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, + uint64_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint64_t mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + + /* Rely on MCUMC_STATUS for correctable error counter + * MCUMC_STATUS is a 64 bit register + */ + mc_umc_status = + RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); + + if (umc_v12_0_is_correctable_error(mc_umc_status)) + *error_count += 1; +} + +static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev, + uint64_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint64_t mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + + /* Check the MCUMC_STATUS. */ + mc_umc_status = + RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); + + if (umc_v12_0_is_uncorrectable_error(mc_umc_status)) + *error_count += 1; +} + +static int umc_v12_0_query_error_count(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + unsigned long ue_count = 0, ce_count = 0; + + /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3], + * which can be used as die ID directly */ + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = node_inst, + }; + + uint64_t umc_reg_offset = + get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); + + umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count); + umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count); + + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + + return 0; +} + +static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_umc_loop_channels(adev, + umc_v12_0_query_error_count, ras_error_status); + + umc_v12_0_reset_error_count(adev); +} + +static bool umc_v12_0_bit_wise_xor(uint32_t val) +{ + bool result = 0; + int i; + + for (i = 0; i < 32; i++) + result = result ^ ((val >> i) & 0x1); + + return result; +} + +static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, uint64_t err_addr, + uint32_t ch_inst, uint32_t umc_inst, + uint32_t node_inst) +{ + uint32_t channel_index, i; + uint64_t soc_pa, na, retired_page, column; + uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row, row_xor; + uint32_t bank0, bank1, bank2, bank3, bank; + + bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; + bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; + bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; + bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; + col = (err_addr >> 1) & 0x1fULL; + row = (err_addr >> 10) & 0x3fffULL; + + /* apply bank hash algorithm */ + bank0 = + bank_hash0 ^ (UMC_V12_0_XOR_EN0 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); + bank1 = + bank_hash1 ^ (UMC_V12_0_XOR_EN1 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); + bank2 = + bank_hash2 ^ (UMC_V12_0_XOR_EN2 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); + bank3 = + bank_hash3 ^ (UMC_V12_0_XOR_EN3 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); + + bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); + err_addr &= ~0x3c0ULL; + err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); + + na = 0x0; + /* convert mca error address to normalized address */ + for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) + na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; + + channel_index = + adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num * + adev->umc.channel_inst_num + + umc_inst * adev->umc.channel_inst_num + + ch_inst]; + /* translate umc channel address to soc pa, 3 parts are included */ + soc_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(na); + + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); + + /* clear [C3 C2] in soc physical address */ + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); + /* clear [C4] in soc physical address */ + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); + + row_xor = row ^ (0x1ULL << 13); + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); + retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); + /* include column bit 0 and 1 */ + col &= 0x3; + col |= (column << 2); + dev_info(adev->dev, + "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + retired_page, row, col, bank, channel_index); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + + /* shift R13 bit */ + retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); + dev_info(adev->dev, + "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + retired_page, row_xor, col, bank, channel_index); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + } +} + +static int umc_v12_0_query_error_address(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + uint64_t mc_umc_status_addr; + uint64_t mc_umc_status, err_addr; + uint64_t mc_umc_addrt0; + struct ras_err_data *err_data = (struct ras_err_data *)data; + uint64_t umc_reg_offset = + get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + + mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return 0; + + if (!err_data->err_addr) { + /* clear umc status */ + WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + + return 0; + } + + /* calculate error address if ue error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) { + + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); + + err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4); + + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + umc_v12_0_convert_error_address(adev, err_data, err_addr, + ch_inst, umc_inst, node_inst); + } + + /* clear umc status */ + WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + + return 0; +} + +static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_umc_loop_channels(adev, + umc_v12_0_query_error_address, ras_error_status); +} + +static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + uint32_t odecc_cnt_sel; + uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr; + uint64_t umc_reg_offset = + get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); + + odecc_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel); + odecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt); + + odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4); + + /* set ce error interrupt type to APIC based interrupt */ + odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel, + OdEccErrInt, 0x1); + WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel); + + /* set error count to initial value */ + WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT); + + return 0; +} + +static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) +{ + amdgpu_umc_loop_channels(adev, + umc_v12_0_err_cnt_init_per_channel, NULL); +} + +static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev) +{ + /* + * Force return true, because regUMCCH0_EccCtrl + * is not accessible from host side + */ + return true; +} + +const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { + .query_ras_error_count = umc_v12_0_query_ras_error_count, + .query_ras_error_address = umc_v12_0_query_ras_error_address, +}; + +struct amdgpu_umc_ras umc_v12_0_ras = { + .ras_block = { + .hw_ops = &umc_v12_0_ras_hw_ops, + }, + .err_cnt_init = umc_v12_0_err_cnt_init, + .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h new file mode 100644 index 000000000..b34b1e358 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -0,0 +1,130 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V12_0_H__ +#define __UMC_V12_0_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +#define UMC_V12_0_NODE_DIST 0x40000000 +#define UMC_V12_0_INST_DIST 0x40000 + +/* UMC register per channel offset */ +#define UMC_V12_0_PER_CHANNEL_OFFSET 0x400 + +/* UMC cross node offset */ +#define UMC_V12_0_CROSS_NODE_OFFSET 0x100000000 + +/* OdEccErrCnt max value */ +#define UMC_V12_0_CE_CNT_MAX 0xffff +/* umc ce interrupt threshold */ +#define UMC_V12_0_CE_INT_THRESHOLD 0xffff +/* umc ce count initial value */ +#define UMC_V12_0_CE_CNT_INIT (UMC_V12_0_CE_CNT_MAX - UMC_V12_0_CE_INT_THRESHOLD) + +/* number of umc channel instance with memory map register access */ +#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8 +/* number of umc instance with memory map register access */ +#define UMC_V12_0_UMC_INSTANCE_NUM 4 + +/* Total channel instances for all available umc nodes */ +#define UMC_V12_0_TOTAL_CHANNEL_NUM(adev) \ + (UMC_V12_0_CHANNEL_INSTANCE_NUM * (adev)->gmc.num_umc) + +/* one piece of normalized address is mapped to 8 pieces of physical address */ +#define UMC_V12_0_NA_MAP_PA_NUM 8 +/* R13 bit shift should be considered, double the number */ +#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) +/* bank bits in MCA error address */ +#define UMC_V12_0_MCA_B0_BIT 6 +#define UMC_V12_0_MCA_B1_BIT 7 +#define UMC_V12_0_MCA_B2_BIT 8 +#define UMC_V12_0_MCA_B3_BIT 9 +/* column bits in SOC physical address */ +#define UMC_V12_0_PA_C2_BIT 15 +#define UMC_V12_0_PA_C4_BIT 21 +/* row bits in SOC physical address */ +#define UMC_V12_0_PA_R13_BIT 35 +/* channel index bits in SOC physical address */ +#define UMC_V12_0_PA_CH4_BIT 12 +#define UMC_V12_0_PA_CH5_BIT 13 +#define UMC_V12_0_PA_CH6_BIT 14 + +/* bank hash settings */ +#define UMC_V12_0_XOR_EN0 1 +#define UMC_V12_0_XOR_EN1 1 +#define UMC_V12_0_XOR_EN2 1 +#define UMC_V12_0_XOR_EN3 1 +#define UMC_V12_0_COL_XOR0 0x0 +#define UMC_V12_0_COL_XOR1 0x0 +#define UMC_V12_0_COL_XOR2 0x800 +#define UMC_V12_0_COL_XOR3 0x1000 +#define UMC_V12_0_ROW_XOR0 0x11111 +#define UMC_V12_0_ROW_XOR1 0x22222 +#define UMC_V12_0_ROW_XOR2 0x4444 +#define UMC_V12_0_ROW_XOR3 0x8888 + +/* channel hash settings */ +#define UMC_V12_0_HASH_4K 0 +#define UMC_V12_0_HASH_64K 1 +#define UMC_V12_0_HASH_2M 1 +#define UMC_V12_0_HASH_1G 1 +#define UMC_V12_0_HASH_1T 1 + +/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), + * hash bit is only effective when related setting is enabled + */ +#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ + (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ + (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ + (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ + (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K)) +#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ + (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ + } while (0) + +bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status); +bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status); + +extern const uint32_t + umc_v12_0_channel_idx_tbl[] + [UMC_V12_0_UMC_INSTANCE_NUM] + [UMC_V12_0_CHANNEL_INSTANCE_NUM]; + +extern struct amdgpu_umc_ras umc_v12_0_ras; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index 46bfdee79..c4c772577 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -336,7 +336,7 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { - uint64_t mc_umc_status; + uint16_t ecc_ce_cnt; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -345,12 +345,10 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic umc_inst * adev->umc.channel_inst_num + ch_inst; - /* check the MCUMC_STATUS */ - mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) { - *error_count += 1; - } + /* Retrieve CE count */ + ecc_ce_cnt = ras->umc_ecc.ecc[eccinfo_table_idx].ce_count_lo_chip; + if (ecc_ce_cnt) + *error_count += ecc_ce_cnt; } static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c new file mode 100644 index 000000000..8e7b763cf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "amdgpu.h" +#include "soc15_common.h" +#include "soc21.h" +#include "vcn/vcn_4_0_0_offset.h" +#include "vcn/vcn_4_0_0_sh_mask.h" + +#include "amdgpu_umsch_mm.h" +#include "umsch_mm_4_0_api_def.h" +#include "umsch_mm_v4_0.h" + +#define regUVD_IPX_DLDO_CONFIG 0x0064 +#define regUVD_IPX_DLDO_CONFIG_BASE_IDX 1 +#define regUVD_IPX_DLDO_STATUS 0x0065 +#define regUVD_IPX_DLDO_STATUS_BASE_IDX 1 + +#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT 0x00000002 +#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK 0x0000000cUL +#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT 0x00000001 +#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK 0x00000002UL + +static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_device *adev = umsch->ring.adev; + uint64_t data; + int r; + + r = amdgpu_umsch_mm_allocate_ucode_buffer(umsch); + if (r) + return r; + + r = amdgpu_umsch_mm_allocate_ucode_data_buffer(umsch); + if (r) + goto err_free_ucode_bo; + + umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr; + + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) { + WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, + 0 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK); + } + + data = RREG32_SOC15(VCN, 0, regUMSCH_MES_RESET_CTRL); + data = REG_SET_FIELD(data, UMSCH_MES_RESET_CTRL, MES_CORE_SOFT_RESET, 0); + WREG32_SOC15_UMSCH(regUMSCH_MES_RESET_CTRL, data); + + data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 1); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 1); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 0); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 1); + WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data); + + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_CNTL); + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, VMID, 0); + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, EXE_DISABLE, 0); + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_CNTL, data); + + WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START, + lower_32_bits(adev->umsch_mm.irq_start_addr >> 2)); + WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START_HI, + upper_32_bits(adev->umsch_mm.irq_start_addr >> 2)); + + WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START, + lower_32_bits(adev->umsch_mm.uc_start_addr >> 2)); + WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START_HI, + upper_32_bits(adev->umsch_mm.uc_start_addr >> 2)); + + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_LO, 0); + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_HI, 0); + + data = adev->umsch_mm.uc_start_addr + adev->umsch_mm.ucode_size - 1; + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_LO, lower_32_bits(data)); + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_HI, upper_32_bits(data)); + + data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? + 0 : adev->umsch_mm.ucode_fw_gpu_addr; + WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_LO, lower_32_bits(data)); + WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_HI, upper_32_bits(data)); + + WREG32_SOC15_UMSCH(regVCN_MES_MIBOUND_LO, 0x1FFFFF); + + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_LO, + lower_32_bits(adev->umsch_mm.data_start_addr)); + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_HI, + upper_32_bits(adev->umsch_mm.data_start_addr)); + + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_LO, + lower_32_bits(adev->umsch_mm.data_size - 1)); + WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_HI, + upper_32_bits(adev->umsch_mm.data_size - 1)); + + data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? + 0 : adev->umsch_mm.data_fw_gpu_addr; + WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_LO, lower_32_bits(data)); + WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_HI, upper_32_bits(data)); + + WREG32_SOC15_UMSCH(regVCN_MES_MDBOUND_LO, 0x3FFFF); + + data = RREG32_SOC15(VCN, 0, regUVD_UMSCH_FORCE); + data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, IC_FORCE_GPUVM, 1); + data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, DC_FORCE_GPUVM, 1); + WREG32_SOC15_UMSCH(regUVD_UMSCH_FORCE, data); + + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 0); + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data); + + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data); + + WREG32_SOC15_UMSCH(regVCN_MES_GP0_LO, 0); + WREG32_SOC15_UMSCH(regVCN_MES_GP0_HI, 0); + + WREG32_SOC15_UMSCH(regVCN_MES_GP1_LO, 0); + WREG32_SOC15_UMSCH(regVCN_MES_GP1_HI, 0); + + data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 0); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 0); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 0); + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 1); + WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) + amdgpu_umsch_mm_psp_execute_cmd_buf(umsch); + + r = SOC15_WAIT_ON_RREG(VCN, 0, regVCN_MES_MSTATUS_LO, 0xAAAAAAAA, 0xFFFFFFFF); + if (r) { + dev_err(adev->dev, "UMSCH FW Load: Failed, regVCN_MES_MSTATUS_LO: 0x%08x\n", + RREG32_SOC15(VCN, 0, regVCN_MES_MSTATUS_LO)); + goto err_free_data_bo; + } + + return 0; + +err_free_data_bo: + amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj, + &adev->umsch_mm.data_fw_gpu_addr, + (void **)&adev->umsch_mm.data_fw_ptr); +err_free_ucode_bo: + amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj, + &adev->umsch_mm.ucode_fw_gpu_addr, + (void **)&adev->umsch_mm.ucode_fw_ptr); + return r; +} + +static void umsch_mm_v4_0_aggregated_doorbell_init(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_device *adev = umsch->ring.adev; + uint32_t data; + + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, OFFSET, + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_REALTIME]); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, EN, 1); + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0, data); + + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, OFFSET, + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_FOCUS]); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, EN, 1); + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1, data); + + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, OFFSET, + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL]); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, EN, 1); + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2, data); + + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, OFFSET, + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_IDLE]); + data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, EN, 1); + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3, data); +} + +static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_ring *ring = &umsch->ring; + struct amdgpu_device *adev = ring->adev; + uint32_t data; + + data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, OFFSET, ring->doorbell_index); + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 1); + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); + + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); + + umsch_mm_v4_0_aggregated_doorbell_init(umsch); + + return 0; +} + +static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_ring *ring = &umsch->ring; + struct amdgpu_device *adev = ring->adev; + uint32_t data; + + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + data = REG_SET_FIELD(data, VCN_RB_ENABLE, UMSCH_RB_EN, 0); + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); + + data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0); + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); + + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) { + WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK); + } + + return 0; +} + +static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch) +{ + union UMSCHAPI__SET_HW_RESOURCES set_hw_resources = {}; + struct amdgpu_device *adev = umsch->ring.adev; + int r; + + set_hw_resources.header.type = UMSCH_API_TYPE_SCHEDULER; + set_hw_resources.header.opcode = UMSCH_API_SET_HW_RSRC; + set_hw_resources.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn; + set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe; + set_hw_resources.engine_mask = umsch->engine_mask; + + set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask; + set_hw_resources.vcn1_hqd_mask[0] = umsch->vcn1_hqd_mask; + set_hw_resources.vcn_hqd_mask[0] = umsch->vcn_hqd_mask[0]; + set_hw_resources.vcn_hqd_mask[1] = umsch->vcn_hqd_mask[1]; + set_hw_resources.vpe_hqd_mask[0] = umsch->vpe_hqd_mask; + + set_hw_resources.g_sch_ctx_gpu_mc_ptr = umsch->sch_ctx_gpu_addr; + + set_hw_resources.enable_level_process_quantum_check = 1; + + memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0], + sizeof(uint32_t) * 5); + set_hw_resources.mmhub_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, MMHUB_HWIP, 0)); + + memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0], + sizeof(uint32_t) * 5); + set_hw_resources.osssys_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); + + set_hw_resources.vcn_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCN_HWIP, 0)); + set_hw_resources.vpe_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0)); + + set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; + set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; + + r = amdgpu_umsch_mm_submit_pkt(umsch, &set_hw_resources.max_dwords_in_api, + API_FRAME_SIZE_IN_DWORDS); + if (r) + return r; + + r = amdgpu_umsch_mm_query_fence(umsch); + if (r) { + dev_err(adev->dev, "UMSCH SET_HW_RESOURCES: Failed\n"); + return r; + } + + return 0; +} + +static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch, + struct umsch_mm_add_queue_input *input_ptr) +{ + struct amdgpu_device *adev = umsch->ring.adev; + union UMSCHAPI__ADD_QUEUE add_queue = {}; + int r; + + add_queue.header.type = UMSCH_API_TYPE_SCHEDULER; + add_queue.header.opcode = UMSCH_API_ADD_QUEUE; + add_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + add_queue.process_id = input_ptr->process_id; + add_queue.page_table_base_addr = input_ptr->page_table_base_addr; + add_queue.process_va_start = input_ptr->process_va_start; + add_queue.process_va_end = input_ptr->process_va_end; + add_queue.process_quantum = input_ptr->process_quantum; + add_queue.process_csa_addr = input_ptr->process_csa_addr; + add_queue.context_quantum = input_ptr->context_quantum; + add_queue.context_csa_addr = input_ptr->context_csa_addr; + add_queue.inprocess_context_priority = input_ptr->inprocess_context_priority; + add_queue.context_global_priority_level = + (enum UMSCH_AMD_PRIORITY_LEVEL)input_ptr->context_global_priority_level; + add_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; + add_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; + add_queue.affinity.u32All = input_ptr->affinity; + add_queue.mqd_addr = input_ptr->mqd_addr; + add_queue.engine_type = (enum UMSCH_ENGINE_TYPE)input_ptr->engine_type; + add_queue.h_context = input_ptr->h_context; + add_queue.h_queue = input_ptr->h_queue; + add_queue.vm_context_cntl = input_ptr->vm_context_cntl; + add_queue.is_context_suspended = input_ptr->is_context_suspended; + + add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; + add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; + + r = amdgpu_umsch_mm_submit_pkt(umsch, &add_queue.max_dwords_in_api, + API_FRAME_SIZE_IN_DWORDS); + if (r) + return r; + + r = amdgpu_umsch_mm_query_fence(umsch); + if (r) { + dev_err(adev->dev, "UMSCH ADD_QUEUE: Failed\n"); + return r; + } + + return 0; +} + +static int umsch_mm_v4_0_remove_queue(struct amdgpu_umsch_mm *umsch, + struct umsch_mm_remove_queue_input *input_ptr) +{ + union UMSCHAPI__REMOVE_QUEUE remove_queue = {}; + struct amdgpu_device *adev = umsch->ring.adev; + int r; + + remove_queue.header.type = UMSCH_API_TYPE_SCHEDULER; + remove_queue.header.opcode = UMSCH_API_REMOVE_QUEUE; + remove_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + remove_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; + remove_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; + remove_queue.context_csa_addr = input_ptr->context_csa_addr; + + remove_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; + remove_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; + + r = amdgpu_umsch_mm_submit_pkt(umsch, &remove_queue.max_dwords_in_api, + API_FRAME_SIZE_IN_DWORDS); + if (r) + return r; + + r = amdgpu_umsch_mm_query_fence(umsch); + if (r) { + dev_err(adev->dev, "UMSCH REMOVE_QUEUE: Failed\n"); + return r; + } + + return 0; +} + +static int umsch_mm_v4_0_set_regs(struct amdgpu_umsch_mm *umsch) +{ + struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm); + + umsch->rb_wptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_WPTR); + umsch->rb_rptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_RPTR); + + return 0; +} + +static const struct umsch_mm_funcs umsch_mm_v4_0_funcs = { + .set_hw_resources = umsch_mm_v4_0_set_hw_resources, + .add_queue = umsch_mm_v4_0_add_queue, + .remove_queue = umsch_mm_v4_0_remove_queue, + .set_regs = umsch_mm_v4_0_set_regs, + .init_microcode = amdgpu_umsch_mm_init_microcode, + .load_microcode = umsch_mm_v4_0_load_microcode, + .ring_init = amdgpu_umsch_mm_ring_init, + .ring_start = umsch_mm_v4_0_ring_start, + .ring_stop = umsch_mm_v4_0_ring_stop, +}; + +void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch) +{ + umsch->funcs = &umsch_mm_v4_0_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h new file mode 100644 index 000000000..06bc0fa74 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __UMSCH_MM_V4_0_H__ +#define __UMSCH_MM_V4_0_H__ + +void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 5534c769b..a6006f231 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -577,8 +577,6 @@ static int uvd_v3_1_sw_init(void *handle) ptr += ucode_len; memcpy(&adev->uvd.keyselect, ptr, 4); - r = amdgpu_uvd_entity_init(adev); - return r; } @@ -706,6 +704,13 @@ static int uvd_v3_1_hw_fini(void *handle) return 0; } +static int uvd_v3_1_prepare_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_uvd_prepare_suspend(adev); +} + static int uvd_v3_1_suspend(void *handle) { int r; @@ -806,6 +811,7 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .sw_fini = uvd_v3_1_sw_fini, .hw_init = uvd_v3_1_hw_init, .hw_fini = uvd_v3_1_hw_fini, + .prepare_suspend = uvd_v3_1_prepare_suspend, .suspend = uvd_v3_1_suspend, .resume = uvd_v3_1_resume, .is_idle = uvd_v3_1_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index c108b8381..1aa09ad7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -127,8 +127,6 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - return r; } @@ -220,6 +218,13 @@ static int uvd_v4_2_hw_fini(void *handle) return 0; } +static int uvd_v4_2_prepare_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_uvd_prepare_suspend(adev); +} + static int uvd_v4_2_suspend(void *handle) { int r; @@ -756,6 +761,7 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .sw_fini = uvd_v4_2_sw_fini, .hw_init = uvd_v4_2_hw_init, .hw_fini = uvd_v4_2_hw_fini, + .prepare_suspend = uvd_v4_2_prepare_suspend, .suspend = uvd_v4_2_suspend, .resume = uvd_v4_2_resume, .is_idle = uvd_v4_2_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index d7e31e48a..f8b229b75 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -125,8 +125,6 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - return r; } @@ -218,6 +216,13 @@ static int uvd_v5_0_hw_fini(void *handle) return 0; } +static int uvd_v5_0_prepare_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_uvd_prepare_suspend(adev); +} + static int uvd_v5_0_suspend(void *handle) { int r; @@ -863,6 +868,7 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .sw_fini = uvd_v5_0_sw_fini, .hw_init = uvd_v5_0_hw_init, .hw_fini = uvd_v5_0_hw_fini, + .prepare_suspend = uvd_v5_0_prepare_suspend, .suspend = uvd_v5_0_suspend, .resume = uvd_v5_0_resume, .is_idle = uvd_v5_0_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 5fe872f4b..a9a6880f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -432,8 +432,6 @@ static int uvd_v6_0_sw_init(void *handle) } } - r = amdgpu_uvd_entity_init(adev); - return r; } @@ -542,6 +540,13 @@ static int uvd_v6_0_hw_fini(void *handle) return 0; } +static int uvd_v6_0_prepare_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_uvd_prepare_suspend(adev); +} + static int uvd_v6_0_suspend(void *handle) { int r; @@ -1528,6 +1533,7 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .sw_fini = uvd_v6_0_sw_fini, .hw_init = uvd_v6_0_hw_init, .hw_fini = uvd_v6_0_hw_fini, + .prepare_suspend = uvd_v6_0_prepare_suspend, .suspend = uvd_v6_0_suspend, .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 86d1d46e1..6068b784d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -480,10 +480,6 @@ static int uvd_v7_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - if (r) - return r; - r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; @@ -612,6 +608,13 @@ static int uvd_v7_0_hw_fini(void *handle) return 0; } +static int uvd_v7_0_prepare_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_uvd_prepare_suspend(adev); +} + static int uvd_v7_0_suspend(void *handle) { int r; @@ -1787,6 +1790,7 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .sw_fini = uvd_v7_0_sw_fini, .hw_init = uvd_v7_0_hw_init, .hw_fini = uvd_v7_0_hw_fini, + .prepare_suspend = uvd_v7_0_prepare_suspend, .suspend = uvd_v7_0_suspend, .resume = uvd_v7_0_resume, .is_idle = NULL /* uvd_v7_0_is_idle */, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 67eb01fef..a08e7abca 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -441,8 +441,6 @@ static int vce_v2_0_sw_init(void *handle) return r; } - r = amdgpu_vce_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 18f6e62af..f4760748d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -450,8 +450,6 @@ static int vce_v3_0_sw_init(void *handle) return r; } - r = amdgpu_vce_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index e0b70cd3b..06d787385 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -486,11 +486,6 @@ static int vce_v4_0_sw_init(void *handle) return r; } - - r = amdgpu_vce_entity_init(adev); - if (r) - return r; - r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 6fbea38f4..aba403d71 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -187,7 +187,7 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0)) ring->vm_hub = AMDGPU_MMHUB1(0); else ring->vm_hub = AMDGPU_MMHUB0(0); @@ -207,7 +207,8 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == + IP_VERSION(2, 5, 0)) ring->vm_hub = AMDGPU_MMHUB1(0); else ring->vm_hub = AMDGPU_MMHUB0(0); @@ -794,7 +795,7 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx, { uint32_t tmp; - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0)) return; tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK | @@ -1985,7 +1986,7 @@ static struct amdgpu_vcn_ras vcn_v2_6_ras = { static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[VCN_HWIP][0]) { + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(2, 6, 0): adev->vcn.ras = &vcn_v2_6_ras; break; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index a61ecefda..e02af4de5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -100,7 +100,8 @@ static int vcn_v3_0_early_init(void *handle) /* both instances are harvested, disable the block */ return -ENOENT; - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == + IP_VERSION(3, 0, 33)) adev->vcn.num_enc_rings = 0; else adev->vcn.num_enc_rings = 2; @@ -227,9 +228,10 @@ static int vcn_v3_0_sw_init(void *handle) cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG; - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2)) + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2)) fw_shared->smu_interface_info.smu_interface_type = 2; - else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1)) + else if (amdgpu_ip_version(adev, UVD_HWIP, 0) == + IP_VERSION(3, 1, 1)) fw_shared->smu_interface_info.smu_interface_type = 1; if (amdgpu_vcnfw_log) @@ -1255,7 +1257,8 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) fw_shared->rb.wptr = lower_32_bits(ring->wptr); fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != + IP_VERSION(3, 0, 33)) { fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); ring = &adev->vcn.inst[i].ring_enc[0]; WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); @@ -1628,7 +1631,8 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != + IP_VERSION(3, 0, 33)) { /* Restore */ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 29164289c..48bfcd0d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -169,15 +169,13 @@ static int vcn_v4_0_sw_init(void *handle) fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) { + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(4, 0, 2)) { fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; fw_shared->drm_key_wa.method = AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; } - if (amdgpu_sriov_vf(adev)) - fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); - if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); } @@ -876,8 +874,6 @@ static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst) | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); - - return; } static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx, @@ -1210,6 +1206,24 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) return 0; } +static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc) +{ + struct amdgpu_vcn_rb_metadata *rb_metadata = NULL; + uint8_t *rb_ptr = (uint8_t *)ring_enc->ring; + + rb_ptr += ring_enc->ring_size; + rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr; + + memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata)); + rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata); + rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG); + rb_metadata->version = 1; + rb_metadata->ring_id = vcn_inst & 0xFF; + + return 0; +} + static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) { int i; @@ -1332,11 +1346,30 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) rb_enc_addr = ring_enc->gpu_addr; rb_setup->is_rb_enabled_flags |= RB_ENABLED; - rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); - rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); - rb_setup->rb_size = ring_enc->ring_size / 4; fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + if (amdgpu_sriov_is_vcn_rb_decouple(adev)) { + vcn_v4_0_init_ring_metadata(adev, i, ring_enc); + + memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP); + if (!(adev->vcn.harvest_config & (1 << 0))) { + rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); + rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); + rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4; + } + if (!(adev->vcn.harvest_config & (1 << 1))) { + rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); + rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); + rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4; + } + fw_shared->decouple.is_enabled = 1; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG); + } else { + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + } + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); @@ -1808,6 +1841,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, + .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata), .get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr, .set_wptr = vcn_v4_0_unified_ring_set_wptr, @@ -1849,7 +1883,7 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2)) vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true; adev->vcn.inst[i].ring_enc[0].funcs = @@ -2021,16 +2055,20 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ { uint32_t ip_instance; - switch (entry->client_id) { - case SOC15_IH_CLIENTID_VCN: - ip_instance = 0; - break; - case SOC15_IH_CLIENTID_VCN1: - ip_instance = 1; - break; - default: - DRM_ERROR("Unhandled client id: %d\n", entry->client_id); - return 0; + if (amdgpu_sriov_is_vcn_rb_decouple(adev)) { + ip_instance = entry->ring_id; + } else { + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } } DRM_DEBUG("IH: VCN TRAP\n"); @@ -2156,7 +2194,7 @@ static struct amdgpu_vcn_ras vcn_v4_0_ras = { static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[VCN_HWIP][0]) { + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 0): adev->vcn.ras = &vcn_v4_0_ras; break; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index f85d18cd7..810bbfccd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1760,6 +1760,11 @@ static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL), tmp, 0, indirect); + tmp = UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, + SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_VCPU_INT_EN2), + tmp, 0, indirect); + tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK; WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN), diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c new file mode 100644 index 000000000..2eda30e78 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -0,0 +1,1779 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "amdgpu_cs.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_hw_ip.h" +#include "vcn_v2_0.h" +#include "mmsch_v4_0.h" +#include "vcn_v4_0_5.h" + +#include "vcn/vcn_4_0_5_offset.h" +#include "vcn/vcn_4_0_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" + +#include + +#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL +#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX +#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA +#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX + +#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 +#define VCN1_VID_SOC_ADDRESS_3_0 0x48300 + +#define VCN_HARVEST_MMSCH 0 + +#define RDECODE_MSG_CREATE 0x00000000 +#define RDECODE_MESSAGE_CREATE 0x00000001 + +static int amdgpu_ih_clientid_vcns[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev); +static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v4_0_5_set_powergating_state(void *handle, + enum amd_powergating_state state); +static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); +static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring); + +/** + * vcn_v4_0_5_early_init - set function pointers and load microcode + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + * Load microcode from filesystem + */ +static int vcn_v4_0_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* re-use enc ring as unified ring */ + adev->vcn.num_enc_rings = 1; + vcn_v4_0_5_set_unified_ring_funcs(adev); + vcn_v4_0_5_set_irq_funcs(adev); + + return amdgpu_vcn_early_init(adev); +} + +/** + * vcn_v4_0_5_sw_init - sw init for VCN block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int vcn_v4_0_5_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r; + + r = amdgpu_vcn_sw_init(adev); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev); + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + atomic_set(&adev->vcn.inst[i].sched_score, 0); + + /* VCN UNIFIED TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); + if (r) + return r; + + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + if (r) + return r; + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->use_doorbell = true; + if (amdgpu_sriov_vf(adev)) + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + i * (adev->vcn.num_enc_rings + 1) + 1; + else + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 2 + 8 * i; + ring->vm_hub = AMDGPU_MMHUB0(0); + sprintf(ring->name, "vcn_unified_%d", i); + + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); + if (r) + return r; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); + fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? + AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; + + if (amdgpu_sriov_vf(adev)) + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + } + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode; + + return 0; +} + +/** + * vcn_v4_0_5_sw_fini - sw fini for VCN block + * + * @handle: amdgpu_device pointer + * + * VCN suspend and free up sw allocation + */ +static int vcn_v4_0_5_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r, idx; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = 0; + } + + drm_dev_exit(idx); + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + + r = amdgpu_vcn_suspend(adev); + if (r) + return r; + + r = amdgpu_vcn_sw_fini(adev); + + return r; +} + +/** + * vcn_v4_0_5_hw_init - start and test VCN block + * + * @handle: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v4_0_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ring = &adev->vcn.inst[i].ring_enc[0]; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); + + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + +done: + if (!r) + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); + + return r; +} + +/** + * vcn_v4_0_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v4_0_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + if (!amdgpu_sriov_vf(adev)) { + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, i, regUVD_STATUS))) { + vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + } + } + + amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + } + + return 0; +} + +/** + * vcn_v4_0_5_suspend - suspend VCN block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend VCN block + */ +static int vcn_v4_0_5_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vcn_v4_0_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vcn_suspend(adev); + + return r; +} + +/** + * vcn_v4_0_5_resume - resume VCN block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init VCN block + */ +static int vcn_v4_0_5_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + r = vcn_v4_0_5_hw_init(adev); + + return r; +} + +/** + * vcn_v4_0_5_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); + offset = size; + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); +} + +/** + * vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Let the VCN memory controller know it's offsets with dpg mode + */ +static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), + 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), + 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + +/** + * vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Disable static power gating for VCN block + */ +static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + } else { + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 0, UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 0, UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 0, UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 0, UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + } + + data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | + UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); +} + +/** + * vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Enable static power gating for VCN block + */ +static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); + + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, + 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + } +} + +/** + * vcn_v4_0_5_disable_clock_gating - disable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Disable clock gating for VCN block + */ +static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + return; + + /* VCN disable CGC */ + data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__SYS_MASK + | UVD_CGC_GATE__UDEC_MASK + | UVD_CGC_GATE__MPEG2_MASK + | UVD_CGC_GATE__REGS_MASK + | UVD_CGC_GATE__RBC_MASK + | UVD_CGC_GATE__LMI_MC_MASK + | UVD_CGC_GATE__LMI_UMC_MASK + | UVD_CGC_GATE__IDCT_MASK + | UVD_CGC_GATE__MPRD_MASK + | UVD_CGC_GATE__MPC_MASK + | UVD_CGC_GATE__LBSI_MASK + | UVD_CGC_GATE__LRBBM_MASK + | UVD_CGC_GATE__UDEC_RE_MASK + | UVD_CGC_GATE__UDEC_CM_MASK + | UVD_CGC_GATE__UDEC_IT_MASK + | UVD_CGC_GATE__UDEC_DB_MASK + | UVD_CGC_GATE__UDEC_MP_MASK + | UVD_CGC_GATE__WCB_MASK + | UVD_CGC_GATE__VCPU_MASK + | UVD_CGC_GATE__MMSCH_MASK); + + WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); + + data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE); + data |= (UVD_SUVD_CGC_GATE__SRE_MASK + | UVD_SUVD_CGC_GATE__SIT_MASK + | UVD_SUVD_CGC_GATE__SMP_MASK + | UVD_SUVD_CGC_GATE__SCM_MASK + | UVD_SUVD_CGC_GATE__SDB_MASK + | UVD_SUVD_CGC_GATE__SRE_H264_MASK + | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK + | UVD_SUVD_CGC_GATE__SIT_H264_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCM_H264_MASK + | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK + | UVD_SUVD_CGC_GATE__SDB_H264_MASK + | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCLR_MASK + | UVD_SUVD_CGC_GATE__UVD_SC_MASK + | UVD_SUVD_CGC_GATE__ENT_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK + | UVD_SUVD_CGC_GATE__SITE_MASK + | UVD_SUVD_CGC_GATE__SRE_VP9_MASK + | UVD_SUVD_CGC_GATE__SCM_VP9_MASK + | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK + | UVD_SUVD_CGC_GATE__SDB_VP9_MASK + | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); + WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data); + + data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); + data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); +} + +/** + * vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode + * + * @adev: amdgpu_device pointer + * @sram_sel: sram select + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Disable clock gating for VCN block with dpg mode + */ +static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, + int inst_idx, uint8_t indirect) +{ + uint32_t reg_data = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + return; + + /* enable sw clock gating control */ + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); +} + +/** + * vcn_v4_0_5_enable_clock_gating - enable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Enable clock gating for VCN block + */ +static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + return; + + /* enable VCN CGC */ + data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); + data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); + data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); +} + +/** + * vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Start VCN block with dpg mode + */ +static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_ring *ring; + uint32_t tmp; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + + /* enable clock gating */ + vcn_v4_0_5_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interrupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); + + /* setup regUVD_LMI_CTRL */ + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MPC_CNTL), + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MPC_SET_MUXA0), + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MPC_SET_MUXB0), + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MPC_SET_MUX), + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); + + vcn_v4_0_5_mc_resume_dpg_mode(adev, inst_idx, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable LMI MC and UMC channels */ + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + + if (indirect) + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + return 0; +} + + +/** + * vcn_v4_0_5_start - VCN start + * + * @adev: amdgpu_device pointer + * + * Start VCN block + */ +static int vcn_v4_0_5_start(struct amdgpu_device *adev) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t tmp; + int i, j, k, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } + + /* disable VCN power gating */ + vcn_v4_0_5_disable_static_power_gating(adev, i); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); + + /*SW clock gating */ + vcn_v4_0_5_disable_clock_gating(adev, i); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup regUVD_MPC_CNTL */ + tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup UVD_MPC_SET_MUX */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v4_0_5_mc_resume(adev, i); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, i, regUVD_STATUS); + if (status & 2) + break; + mdelay(10); + if (amdgpu_emu_mode == 1) + msleep(1); + } + + if (amdgpu_emu_mode == 1) { + r = -1; + if (status & 2) { + r = 0; + break; + } + } else { + r = 0; + if (status & 2) + break; + + dev_err(adev->dev, + "VCN[%d] is not responding, trying to reset VCPU!!!\n", i); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + } + + if (r) { + dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + } + + return 0; +} + +/** + * vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * + * Stop VCN block with dpg mode + */ +static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + uint32_t tmp; + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); +} + +/** + * vcn_v4_0_5_stop - VCN stop + * + * @adev: amdgpu_device pointer + * + * Stop VCN block + */ +static int vcn_v4_0_5_stop(struct amdgpu_device *adev) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + uint32_t tmp; + int i, r = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + vcn_v4_0_5_stop_dpg_mode(adev, i); + continue; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + /* clear status */ + WREG32_SOC15(VCN, i, regUVD_STATUS, 0); + + /* apply HW clock gating */ + vcn_v4_0_5_enable_clock_gating(adev, i); + + /* enable VCN power gating */ + vcn_v4_0_5_enable_static_power_gating(adev, i); + } + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + + return 0; +} + +/** + * vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, + struct dpg_pause_state *new_state) +{ + uint32_t reg_data = 0; + int ret_code; + + /* pause/unpause if state is changed */ + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d", + adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); + reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + if (!ret_code) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + } + } else { + /* unpause dpg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); + } + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + +/** + * vcn_v4_0_5_unified_ring_get_rptr - get unified read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified read pointer + */ +static uint64_t vcn_v4_0_5_unified_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); +} + +/** + * vcn_v4_0_5_unified_ring_get_wptr - get unified write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified write pointer + */ +static uint64_t vcn_v4_0_5_unified_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); +} + +/** + * vcn_v4_0_5_unified_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p, + struct amdgpu_job *job) +{ + struct drm_gpu_scheduler **scheds; + + /* The create msg must be in the first IB submitted */ + if (atomic_read(&job->base.entity->fence_seq)) + return -EINVAL; + + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] + [AMDGPU_RING_PRIO_0].sched; + drm_sched_entity_modify_sched(job->base.entity, scheds, 1); + return 0; +} + +static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, + uint64_t addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_va_mapping *map; + uint32_t *msg, num_buffers; + struct amdgpu_bo *bo; + uint64_t start, end; + unsigned int i; + void *ptr; + int r; + + addr &= AMDGPU_GMC_HOLE_MASK; + r = amdgpu_cs_find_mapping(p, addr, &bo, &map); + if (r) { + DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); + return r; + } + + start = map->start * AMDGPU_GPU_PAGE_SIZE; + end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; + if (addr & 0x7) { + DRM_ERROR("VCN messages must be 8 byte aligned!\n"); + return -EINVAL; + } + + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) { + DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); + return r; + } + + r = amdgpu_bo_kmap(bo, &ptr); + if (r) { + DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); + return r; + } + + msg = ptr + addr - start; + + /* Check length */ + if (msg[1] > end - addr) { + r = -EINVAL; + goto out; + } + + if (msg[3] != RDECODE_MSG_CREATE) + goto out; + + num_buffers = msg[2]; + for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { + uint32_t offset, size, *create; + + if (msg[0] != RDECODE_MESSAGE_CREATE) + continue; + + offset = msg[1]; + size = msg[2]; + + if (offset + size > end) { + r = -EINVAL; + goto out; + } + + create = ptr + addr + offset - start; + + /* H264, HEVC and VP9 can run on any instance */ + if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) + continue; + + r = vcn_v4_0_5_limit_sched(p, job); + if (r) + goto out; + } + +out: + amdgpu_bo_kunmap(bo); + return r; +} + +#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) +#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) + +#define RADEON_VCN_ENGINE_INFO (0x30000001) +#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 + +#define RENCODE_ENCODE_STANDARD_AV1 2 +#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 +#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 + +/* return the offset in ib if id is found, -1 otherwise + * to speed up the searching we only search upto max_offset + */ +static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) +{ + int i; + + for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { + if (ib->ptr[i + 1] == id) + return i; + } + return -1; +} + +static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, + struct amdgpu_job *job, + struct amdgpu_ib *ib) +{ + struct amdgpu_ring *ring = amdgpu_job_ring(job); + struct amdgpu_vcn_decode_buffer *decode_buffer; + uint64_t addr; + uint32_t val; + int idx; + + /* The first instance can decode anything */ + if (!ring->me) + return 0; + + /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ + idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, + RADEON_VCN_ENGINE_INFO_MAX_OFFSET); + if (idx < 0) /* engine info is missing */ + return 0; + + val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ + if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { + decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; + + if (!(decode_buffer->valid_buf_flag & 0x1)) + return 0; + + addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | + decode_buffer->msg_buffer_address_lo; + return vcn_v4_0_5_dec_msg(p, job, addr); + } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { + idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, + RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); + if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) + return vcn_v4_0_5_limit_sched(p, job); + } + return 0; +} + +static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, + .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, + .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, + .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_unified_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v4_0_5_set_unified_ring_funcs - set unified ring functions + * + * @adev: amdgpu_device pointer + * + * Set unified ring functions + */ +static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[0].me = i; + + DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i); + } +} + +/** + * vcn_v4_0_5_is_idle - check VCN block is idle + * + * @handle: amdgpu_device pointer + * + * Check whether VCN block is idle + */ +static bool vcn_v4_0_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); + } + + return ret; +} + +/** + * vcn_v4_0_5_wait_for_idle - wait for VCN block idle + * + * @handle: amdgpu_device pointer + * + * Wait for VCN block idle + */ +static int vcn_v4_0_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; +} + +/** + * vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state + * + * @handle: amdgpu_device pointer + * @state: clock gating state + * + * Set VCN block clockgating state + */ +static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (enable) { + if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v4_0_5_enable_clock_gating(adev, i); + } else { + vcn_v4_0_5_disable_clock_gating(adev, i); + } + } + + return 0; +} + +/** + * vcn_v4_0_5_set_powergating_state - set VCN block powergating state + * + * @handle: amdgpu_device pointer + * @state: power gating state + * + * Set VCN block powergating state + */ +static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->vcn.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v4_0_5_stop(adev); + else + ret = vcn_v4_0_5_start(adev); + + if (!ret) + adev->vcn.cur_state = state; + + return ret; +} + +/** + * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block interrupt state + */ +static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + unsigned type, enum amdgpu_interrupt_state state) +{ + return 0; +} + +/** + * vcn_v4_0_5_process_interrupt - process VCN block interrupt + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @entry: interrupt entry from clients and sources + * + * Process VCN block interrupt + */ +static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: VCN TRAP\n"); + + switch (entry->src_id) { + case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); + break; + case VCN_4_0__SRCID_UVD_POISON: + amdgpu_vcn_process_poison_irq(adev, source, entry); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = { + .set = vcn_v4_0_5_set_interrupt_state, + .process = vcn_v4_0_5_process_interrupt, +}; + +/** + * vcn_v4_0_5_set_irq_funcs - set VCN block interrupt irq functions + * + * @adev: amdgpu_device pointer + * + * Set VCN block interrupt irq functions + */ +static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs; + } +} + +static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { + .name = "vcn_v4_0_5", + .early_init = vcn_v4_0_5_early_init, + .late_init = NULL, + .sw_init = vcn_v4_0_5_sw_init, + .sw_fini = vcn_v4_0_5_sw_fini, + .hw_init = vcn_v4_0_5_hw_init, + .hw_fini = vcn_v4_0_5_hw_fini, + .suspend = vcn_v4_0_5_suspend, + .resume = vcn_v4_0_5_resume, + .is_idle = vcn_v4_0_5_is_idle, + .wait_for_idle = vcn_v4_0_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, + .set_powergating_state = vcn_v4_0_5_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 4, + .minor = 0, + .rev = 5, + .funcs = &vcn_v4_0_5_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h new file mode 100644 index 000000000..ff9b3d6f6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h @@ -0,0 +1,35 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCN_V4_0_5_H__ +#define __VCN_V4_0_5_H__ + +enum amdgpu_vcn_v4_0_5_sub_block { + AMDGPU_VCN_V4_0_5_VCPU_VCODEC = 0, + + AMDGPU_VCN_V4_0_5_MAX_SUB_BLOCK, +}; + +extern const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block; + +#endif /* __VCN_V4_0_5_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index d364c6dd1..bf68e18e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index dbc995364..db66e6ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -291,7 +291,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_control(adev); - if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 2, 1)) && + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); if (adev->irq.ih.use_bus_addr) { @@ -304,8 +304,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) /* psp firmware won't program IH_CHICKEN for aldebaran * driver needs to program it properly according to * MC_SPACE type in IH_RB_CNTL */ - if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) || - (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) { + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) || + (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) { ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); if (adev->irq.ih.use_bus_addr) { ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, @@ -334,8 +334,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index)); /* Enable IH Retry CAM */ - if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0) || - adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)) + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) || + amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2)) WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN, ENABLE, 1); else @@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } @@ -537,7 +543,7 @@ static int vega20_ih_sw_init(void *handle) return r; if ((adev->flags & AMD_IS_APU) && - (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) + (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) use_bus_addr = false; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); @@ -554,7 +560,7 @@ static int vega20_ih_sw_init(void *handle) adev->irq.ih1.use_doorbell = true; adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; - if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) { + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) != IP_VERSION(4, 4, 2)) { r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fe8ba9e98..1a9881298 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1124,11 +1124,10 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported()) + if (!amdgpu_device_should_use_aspm(adev)) return; - if (adev->flags & AMD_IS_APU || - adev->asic_type < CHIP_POLARIS10) + if (adev->asic_type < CHIP_POLARIS10) return; orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h b/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h new file mode 100644 index 000000000..9b550deb4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h @@ -0,0 +1,217 @@ +/* Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __VPE_6_1_FW_IF_H_ +#define __VPE_6_1_FW_IF_H_ + +/**************** + * VPE OP Codes + ****************/ +enum VPE_CMD_OPCODE { + VPE_CMD_OPCODE_NOP = 0x0, + VPE_CMD_OPCODE_VPE_DESC = 0x1, + VPE_CMD_OPCODE_PLANE_CFG = 0x2, + VPE_CMD_OPCODE_VPEP_CFG = 0x3, + VPE_CMD_OPCODE_INDIRECT = 0x4, + VPE_CMD_OPCODE_FENCE = 0x5, + VPE_CMD_OPCODE_TRAP = 0x6, + VPE_CMD_OPCODE_REG_WRITE = 0x7, + VPE_CMD_OPCODE_POLL_REGMEM = 0x8, + VPE_CMD_OPCODE_COND_EXE = 0x9, + VPE_CMD_OPCODE_ATOMIC = 0xA, + VPE_CMD_OPCODE_PLANE_FILL = 0xB, + VPE_CMD_OPCODE_TIMESTAMP = 0xD +}; + +/** Generic Command Header + * Generic Commands include: + * Noop, Fence, Trap, + * RegisterWrite, PollRegisterWriteMemory, + * SetLocalTimestamp, GetLocalTimestamp + * GetGlobalGPUTimestamp */ +#define VPE_HEADER_SUB_OPCODE__SHIFT 8 +#define VPE_HEADER_SUB_OPCODE_MASK 0x0000FF00 +#define VPE_HEADER_OPCODE__SHIFT 0 +#define VPE_HEADER_OPCODE_MASK 0x000000FF + +#define VPE_CMD_HEADER(op, subop) \ + (((subop << VPE_HEADER_SUB_OPCODE__SHIFT) & VPE_HEADER_SUB_OPCODE_MASK) | \ + ((op << VPE_HEADER_OPCODE__SHIFT) & VPE_HEADER_OPCODE_MASK)) + + + /*************************** + * VPE NOP + ***************************/ +#define VPE_CMD_NOP_HEADER_COUNT__SHIFT 16 +#define VPE_CMD_NOP_HEADER_COUNT_MASK 0x00003FFF + +#define VPE_CMD_NOP_HEADER_COUNT(count) \ + (((count) & VPE_CMD_NOP_HEADER_COUNT_MASK) << VPE_CMD_NOP_HEADER_COUNT__SHIFT) + + /*************************** + * VPE Descriptor + ***************************/ +#define VPE_DESC_CD__SHIFT 16 +#define VPE_DESC_CD_MASK 0x000F0000 + +#define VPE_DESC_CMD_HEADER(cd) \ + (VPE_CMD_HEADER(VPE_CMD_OPCODE_VPE_DESC, 0) | \ + (((cd) << VPE_DESC_CD__SHIFT) & VPE_DESC_CD_MASK)) + + /*************************** + * VPE Plane Config + ***************************/ +enum VPE_PLANE_CFG_SUBOP { + VPE_PLANE_CFG_SUBOP_1_TO_1 = 0x0, + VPE_PLANE_CFG_SUBOP_2_TO_1 = 0x1, + VPE_PLANE_CFG_SUBOP_2_TO_2 = 0x2 +}; + +#define VPE_PLANE_CFG_ONE_PLANE 0 +#define VPE_PLANE_CFG_TWO_PLANES 1 + +#define VPE_PLANE_CFG_NPS0__SHIFT 16 +#define VPE_PLANE_CFG_NPS0_MASK 0x00030000 + +#define VPE_PLANE_CFG_NPD0__SHIFT 18 +#define VPE_PLANE_CFG_NPD0_MASK 0x000C0000 + +#define VPE_PLANE_CFG_NPS1__SHIFT 20 +#define VPE_PLANE_CFG_NPS1_MASK 0x00300000 + +#define VPE_PLANE_CFG_NPD1__SHIFT 22 +#define VPE_PLANE_CFG_NPD1_MASK 0x00C00000 + +#define VPE_PLANE_CFG_TMZ__SHIFT 16 +#define VPE_PLANE_CFG_TMZ_MASK 0x00010000 + +#define VPE_PLANE_CFG_SWIZZLE_MODE__SHIFT 3 +#define VPE_PLANE_CFG_SWIZZLE_MODE_MASK 0x000000F8 + +#define VPE_PLANE_CFG_ROTATION__SHIFT 0 +#define VPE_PLANE_CFG_ROTATION_MASK 0x00000003 + +#define VPE_PLANE_ADDR_LO__SHIFT 0 +#define VPE_PLANE_ADDR_LO_MASK 0xFFFFFF00 + +#define VPE_PLANE_CFG_PITCH__SHIFT 0 +#define VPE_PLANE_CFG_PITCH_MASK 0x00003FFF + +#define VPE_PLANE_CFG_VIEWPORT_Y__SHIFT 16 +#define VPE_PLANE_CFG_VIEWPORT_Y_MASK 0x3FFF0000 +#define VPE_PLANE_CFG_VIEWPORT_X__SHIFT 0 +#define VPE_PLANE_CFG_VIEWPORT_X_MASK 0x00003FFF + + +#define VPE_PLANE_CFG_VIEWPORT_HEIGHT__SHIFT 16 +#define VPE_PLANE_CFG_VIEWPORT_HEIGHT_MASK 0x1FFF0000 +#define VPE_PLANE_CFG_VIEWPORT_ELEMENT_SIZE__SHIFT 13 +#define VPE_PLANE_CFG_VIEWPORT_ELEMENT_SIZE_MASK 0x0000E000 +#define VPE_PLANE_CFG_VIEWPORT_WIDTH__SHIFT 0 +#define VPE_PLANE_CFG_VIEWPORT_WIDTH_MASK 0x00001FFF + +enum VPE_PLANE_CFG_ELEMENT_SIZE { + VPE_PLANE_CFG_ELEMENT_SIZE_8BPE = 0, + VPE_PLANE_CFG_ELEMENT_SIZE_16BPE = 1, + VPE_PLANE_CFG_ELEMENT_SIZE_32BPE = 2, + VPE_PLANE_CFG_ELEMENT_SIZE_64BPE = 3 +}; + +#define VPE_PLANE_CFG_CMD_HEADER(subop, nps0, npd0, nps1, npd1) \ + (VPE_CMD_HEADER(VPE_CMD_OPCODE_PLANE_CFG, subop) | \ + (((nps0) << VPE_PLANE_CFG_NPS0__SHIFT) & VPE_PLANE_CFG_NPS0_MASK) | \ + (((npd0) << VPE_PLANE_CFG_NPD0__SHIFT) & VPE_PLANE_CFG_NPD0_MASK) | \ + (((nps1) << VPE_PLANE_CFG_NPS1__SHIFT) & VPE_PLANE_CFG_NPS1_MASK) | \ + (((npd0) << VPE_PLANE_CFG_NPD1__SHIFT) & VPE_PLANE_CFG_NPD1_MASK)) + + +/************************ + * VPEP Config + ************************/ +enum VPE_VPEP_CFG_SUBOP { + VPE_VPEP_CFG_SUBOP_DIR_CFG = 0x0, + VPE_VPEP_CFG_SUBOP_IND_CFG = 0x1 +}; + + +// Direct Config Command Header +#define VPE_DIR_CFG_HEADER_ARRAY_SIZE__SHIFT 16 +#define VPE_DIR_CFG_HEADER_ARRAY_SIZE_MASK 0xFFFF0000 + +#define VPE_DIR_CFG_CMD_HEADER(subop, arr_sz) \ + (VPE_CMD_HEADER(VPE_CMD_OPCODE_VPEP_CFG, subop) | \ + (((arr_sz) << VPE_DIR_CFG_HEADER_ARRAY_SIZE__SHIFT) & VPE_DIR_CFG_HEADER_ARRAY_SIZE_MASK)) + + +#define VPE_DIR_CFG_PKT_REGISTER_OFFSET__SHIFT 2 +#define VPE_DIR_CFG_PKT_REGISTER_OFFSET_MASK 0x000FFFFC + +#define VPE_DIR_CFG_PKT_DATA_SIZE__SHIFT 20 +#define VPE_DIR_CFG_PKT_DATA_SIZE_MASK 0xFFF00000 + + +// InDirect Config Command Header +#define VPE_IND_CFG_HEADER_NUM_DST__SHIFT 28 +#define VPE_IND_CFG_HEADER_NUM_DST_MASK 0xF0000000 + +#define VPE_IND_CFG_CMD_HEADER(subop, num_dst) \ + (VPE_CMD_HEADER(VPE_CMD_OPCODE_VPEP_CFG, subop) | \ + (((num_dst) << VPE_IND_CFG_HEADER_NUM_DST__SHIFT) & VPE_IND_CFG_HEADER_NUM_DST_MASK)) + +// Indirect Buffer Command Header +#define VPE_CMD_INDIRECT_HEADER_VMID__SHIFT 16 +#define VPE_CMD_INDIRECT_HEADER_VMID_MASK 0x0000000F +#define VPE_CMD_INDIRECT_HEADER_VMID(vmid) \ + (((vmid) & VPE_CMD_INDIRECT_HEADER_VMID_MASK) << VPE_CMD_INDIRECT_HEADER_VMID__SHIFT) + + +/************************** + * Poll Reg/Mem Sub-OpCode + **************************/ +enum VPE_POLL_REGMEM_SUBOP { + VPE_POLL_REGMEM_SUBOP_REGMEM = 0x0, + VPE_POLL_REGMEM_SUBOP_REGMEM_WRITE = 0x1 +}; + +#define VPE_CMD_POLL_REGMEM_HEADER_FUNC__SHIFT 28 +#define VPE_CMD_POLL_REGMEM_HEADER_FUNC_MASK 0x00000007 +#define VPE_CMD_POLL_REGMEM_HEADER_FUNC(func) \ + (((func) & VPE_CMD_POLL_REGMEM_HEADER_FUNC_MASK) << VPE_CMD_POLL_REGMEM_HEADER_FUNC__SHIFT) + +#define VPE_CMD_POLL_REGMEM_HEADER_MEM__SHIFT 31 +#define VPE_CMD_POLL_REGMEM_HEADER_MEM_MASK 0x00000001 +#define VPE_CMD_POLL_REGMEM_HEADER_MEM(mem) \ + (((mem) & VPE_CMD_POLL_REGMEM_HEADER_MEM_MASK) << VPE_CMD_POLL_REGMEM_HEADER_MEM__SHIFT) + +#define VPE_CMD_POLL_REGMEM_DW5_INTERVAL__SHIFT 0 +#define VPE_CMD_POLL_REGMEM_DW5_INTERVAL_MASK 0x0000FFFF +#define VPE_CMD_POLL_REGMEM_DW5_INTERVAL(interval) \ + (((interval) & VPE_CMD_POLL_REGMEM_DW5_INTERVAL_MASK) << VPE_CMD_POLL_REGMEM_DW5_INTERVAL__SHIFT) + +#define VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT__SHIFT 16 +#define VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT_MASK 0x00000FFF +#define VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(count) \ + (((count) & VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT_MASK) << VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT__SHIFT) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c new file mode 100644 index 000000000..174f13eff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c @@ -0,0 +1,291 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_vpe.h" +#include "vpe_v6_1.h" +#include "soc15_common.h" +#include "ivsrcid/vpe/irqsrcs_vpe_6_1.h" +#include "vpe/vpe_6_1_0_offset.h" +#include "vpe/vpe_6_1_0_sh_mask.h" + +MODULE_FIRMWARE("amdgpu/vpe_6_1_0.bin"); + +#define VPE_THREAD1_UCODE_OFFSET 0x8000 + +static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset) +{ + uint32_t base; + + base = vpe->ring.adev->reg_offset[VPE_HWIP][0][0]; + + return base + offset; +} + +static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt) +{ + struct amdgpu_device *adev = vpe->ring.adev; + uint32_t f32_cntl; + + f32_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0); + f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL), f32_cntl); +} + +static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); + int ret; + + ret = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VPE, + VPE_6_1_SRCID__VPE_TRAP, + &adev->vpe.trap_irq); + if (ret) + return ret; + + return 0; +} + +static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = vpe->ring.adev; + const struct vpe_firmware_header_v1_0 *vpe_hdr; + const __le32 *data; + uint32_t ucode_offset[2], ucode_size[2]; + uint32_t i, size_dw; + uint32_t ret; + + // disable UMSCH_INT_ENABLE + ret = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL)); + ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), ret); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + uint32_t f32_offset, f32_cntl; + + f32_offset = vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL); + f32_cntl = RREG32(f32_offset); + f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, 0); + f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, 0); + + adev->vpe.cmdbuf_cpu_addr[0] = f32_offset; + adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl; + + amdgpu_vpe_psp_update_sram(adev); + return 0; + } + + vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; + + /* Thread 0(command thread) ucode offset/size */ + ucode_offset[0] = le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes); + ucode_size[0] = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes); + /* Thread 1(control thread) ucode offset/size */ + ucode_offset[1] = le32_to_cpu(vpe_hdr->ctl_ucode_offset); + ucode_size[1] = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes); + + vpe_v6_1_halt(vpe, true); + + for (i = 0; i < 2; i++) { + if (i > 0) + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET); + else + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), 0); + + data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]); + size_dw = ucode_size[i] / sizeof(__le32); + + while (size_dw--) { + if (amdgpu_emu_mode && size_dw % 500 == 0) + msleep(1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_DATA), le32_to_cpup(data++)); + } + + } + + vpe_v6_1_halt(vpe, false); + + return 0; +} + +static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe) +{ + struct amdgpu_ring *ring = &vpe->ring; + struct amdgpu_device *adev = ring->adev; + uint32_t rb_bufsz, rb_cntl; + uint32_t ib_cntl; + uint32_t doorbell, doorbell_offset; + int ret; + + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); + rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl); + + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR), 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_HI), 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), 0); + + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1); + + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0); + + doorbell = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL)); + doorbell_offset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET)); + + doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0); + doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index); + + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL), doorbell); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset); + + adev->nbio.funcs->vpe_doorbell_range(adev, 0, ring->use_doorbell, ring->doorbell_index, 2); + + rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl); + + ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl); + + ring->sched.ready = true; + + ret = amdgpu_ring_test_helper(ring); + if (ret) { + ring->sched.ready = false; + return ret; + } + + return 0; +} + +static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = vpe->ring.adev; + uint32_t queue_reset; + int ret; + + queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ)); + queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset); + + ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0, + VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); + if (ret) + dev_err(adev->dev, "VPE queue reset failed\n"); + + vpe->ring.sched.ready = false; + + return ret; +} + +static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + struct amdgpu_vpe *vpe = &adev->vpe; + uint32_t vpe_cntl; + + vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL)); + vpe_cntl = REG_SET_FIELD(vpe_cntl, VPEC_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl); + + return 0; +} + +static int vpe_v6_1_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + dev_dbg(adev->dev, "IH: VPE trap\n"); + + switch (entry->client_id) { + case SOC21_IH_CLIENTID_VPE: + amdgpu_fence_process(&adev->vpe.ring); + break; + default: + break; + } + + return 0; +} + +static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe) +{ + vpe->regs.queue0_rb_rptr_lo = regVPEC_QUEUE0_RB_RPTR; + vpe->regs.queue0_rb_rptr_hi = regVPEC_QUEUE0_RB_RPTR_HI; + vpe->regs.queue0_rb_wptr_lo = regVPEC_QUEUE0_RB_WPTR; + vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI; + vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT; + + return 0; +} + +static const struct vpe_funcs vpe_v6_1_funcs = { + .get_reg_offset = vpe_v6_1_get_reg_offset, + .set_regs = vpe_v6_1_set_regs, + .irq_init = vpe_v6_1_irq_init, + .init_microcode = amdgpu_vpe_init_microcode, + .load_microcode = vpe_v6_1_load_microcode, + .ring_init = amdgpu_vpe_ring_init, + .ring_start = vpe_v6_1_ring_start, + .ring_stop = vpe_v_6_1_ring_stop, + .ring_fini = amdgpu_vpe_ring_fini, +}; + +static const struct amdgpu_irq_src_funcs vpe_v6_1_trap_irq_funcs = { + .set = vpe_v6_1_set_trap_irq_state, + .process = vpe_v6_1_process_trap_irq, +}; + +void vpe_v6_1_set_funcs(struct amdgpu_vpe *vpe) +{ + vpe->funcs = &vpe_v6_1_funcs; + vpe->trap_irq.funcs = &vpe_v6_1_trap_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h new file mode 100644 index 000000000..a9bea7905 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h @@ -0,0 +1,29 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __VPE_V6_1_H__ +#define __VPE_V6_1_H__ + +#include "amdgpu_vpe.h" + +void vpe_v6_1_set_funcs(struct amdgpu_vpe *vpe); + +#endif -- cgit v1.2.3